From a81716f651ffa291fb56a755ddcaef30be63a906 Mon Sep 17 00:00:00 2001 From: xtqqczze <45661989+xtqqczze@users.noreply.github.com> Date: Sat, 24 Jan 2026 02:29:59 +0000 Subject: [PATCH 01/30] ci: use `macos-15-intel` runner seems unnecessary to use `macos-15-large` just for x86_64 support --- library/stdarch/.github/workflows/main.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml index 6cf0e9f02fe54..2b2f64b74b544 100644 --- a/library/stdarch/.github/workflows/main.yml +++ b/library/stdarch/.github/workflows/main.yml @@ -101,9 +101,9 @@ jobs: # macOS targets - tuple: x86_64-apple-darwin - os: macos-15-large + os: macos-15-intel - tuple: x86_64-apple-ios-macabi - os: macos-15-large + os: macos-15-intel - tuple: aarch64-apple-darwin os: macos-15 - tuple: aarch64-apple-ios-macabi From c10dc0c77f8fb1573794fb88a9934c51e1d6f30b Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Wed, 15 Apr 2026 12:26:33 +0200 Subject: [PATCH 02/30] provide a reminder on the signedness of a failed `assert_simm_bits` --- library/stdarch/crates/core_arch/src/macros.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/macros.rs b/library/stdarch/crates/core_arch/src/macros.rs index 00e92428b3e7e..ee16f5eb693b1 100644 --- a/library/stdarch/crates/core_arch/src/macros.rs +++ b/library/stdarch/crates/core_arch/src/macros.rs @@ -26,7 +26,7 @@ macro_rules! static_assert_uimm_bits { stringify!($imm), " doesn't fit in ", stringify!($bits), - " bits", + " bits (unsigned)", ) ) } @@ -42,7 +42,7 @@ macro_rules! static_assert_simm_bits { stringify!($imm), " doesn't fit in ", stringify!($bits), - " bits", + " bits (signed)", ) ) }; From 34f0c8261700738ddc8290a872fcae838b051835 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Fri, 20 Mar 2026 10:05:34 +0800 Subject: [PATCH 03/30] stdarch-gen-loongarch: Support marking intrinsics as portable --- .../crates/stdarch-gen-loongarch/src/main.rs | 23 ++++++++++++++++++- .../src/portable-intrinsics.txt | 2 ++ 2 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 library/stdarch/crates/stdarch-gen-loongarch/src/portable-intrinsics.txt diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs index 3a946a12d6619..8bdee521a3e86 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs @@ -1,3 +1,4 @@ +use std::collections::HashSet; use std::env; use std::fmt; use std::fs::File; @@ -90,6 +91,14 @@ impl TargetFeature { } } +fn portable_intrinsics() -> HashSet<&'static str> { + include_str!("portable-intrinsics.txt") + .lines() + .map(str::trim) + .filter(|line| !line.is_empty() && !line.starts_with('#')) + .collect() +} + fn gen_spec(in_file: String, ext_name: &str) -> io::Result<()> { let f = File::open(in_file.clone()).unwrap_or_else(|_| panic!("Failed to open {in_file}")); let f = BufReader::new(f); @@ -105,6 +114,7 @@ fn gen_spec(in_file: String, ext_name: &str) -> io::Result<()> { let mut asm_fmts = String::new(); let mut data_types = String::new(); let fn_pat = format!("__{ext_name}_"); + let portable_intrinsics = portable_intrinsics(); for line in f.lines() { let line = line.unwrap(); if line.is_empty() { @@ -121,6 +131,9 @@ fn gen_spec(in_file: String, ext_name: &str) -> io::Result<()> { let e = line.find('(').unwrap(); let name = line.get(s + 2..e).unwrap().trim().to_string(); out.push_str(&format!("/// {name}\n")); + if portable_intrinsics.contains(name.as_str()) { + out.push_str("impl = portable\n"); + } out.push_str(&format!("name = {name}\n")); out.push_str(&format!("asm-fmts = {asm_fmts}\n")); out.push_str(&format!("data-types = {data_types}\n")); @@ -146,6 +159,7 @@ fn gen_bind(in_file: String, ext_name: &str) -> io::Result<()> { let mut link_function_str = String::new(); let mut function_str = String::new(); let mut out = String::new(); + let mut skip = false; out.push_str(&format!( r#"// This code is automatically generated. DO NOT MODIFY. @@ -173,7 +187,9 @@ unsafe extern "unadjusted" { if line.is_empty() { continue; } - if let Some(name) = line.strip_prefix("name = ") { + if line.starts_with("impl = portable") { + skip = true; + } else if let Some(name) = line.strip_prefix("name = ") { current_name = Some(String::from(name)); } else if line.starts_with("asm-fmts = ") { asm_fmts = line[10..] @@ -210,6 +226,11 @@ unsafe extern "unadjusted" { panic!("DEBUG: line: {0} len: {1}", line, data_types.len()); } + if skip { + skip = false; + continue; + } + let (link_function, function) = gen_bind_body(¤t_name, &asm_fmts, &in_t, out_t, para_num, target); link_function_str.push_str(&link_function); diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/portable-intrinsics.txt b/library/stdarch/crates/stdarch-gen-loongarch/src/portable-intrinsics.txt new file mode 100644 index 0000000000000..b62f7f5f8f05d --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-loongarch/src/portable-intrinsics.txt @@ -0,0 +1,2 @@ +# LSX intrinsics +# LASX intrinsics From 09615f33cc6e8f37e12629e584c6cbcf8d0b2c9f Mon Sep 17 00:00:00 2001 From: Brian Cain Date: Wed, 8 Apr 2026 09:52:17 -0700 Subject: [PATCH 04/30] hexagon: Add scalar DSP intrinsics --- library/stdarch/Cargo.lock | 150 +- .../crates/core_arch/src/hexagon/mod.rs | 15 +- .../crates/core_arch/src/hexagon/scalar.rs | 12181 ++++++++++++++++ .../stdarch-gen-hexagon-scalar/Cargo.toml | 9 + .../hexagon_protos.h | 8439 +++++++++++ .../stdarch-gen-hexagon-scalar/src/main.rs | 672 + library/stdarch/examples/gaussian.rs | 7 +- 7 files changed, 21392 insertions(+), 81 deletions(-) create mode 100644 library/stdarch/crates/core_arch/src/hexagon/scalar.rs create mode 100644 library/stdarch/crates/stdarch-gen-hexagon-scalar/Cargo.toml create mode 100644 library/stdarch/crates/stdarch-gen-hexagon-scalar/hexagon_protos.h create mode 100644 library/stdarch/crates/stdarch-gen-hexagon-scalar/src/main.rs diff --git a/library/stdarch/Cargo.lock b/library/stdarch/Cargo.lock index 7e7cb592889a8..a1c31fa9f0cea 100644 --- a/library/stdarch/Cargo.lock +++ b/library/stdarch/Cargo.lock @@ -13,9 +13,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.21" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" +checksum = "824a212faf96e9acacdbd09febd34438f8f711fb84e09a8916013cd7815ca28d" dependencies = [ "anstyle", "anstyle-parse", @@ -28,15 +28,15 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.13" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" +checksum = "940b3a0ca603d1eade50a4846a2afffd5ef57a9feac2c0e2ec2e14f9ead76000" [[package]] name = "anstyle-parse" -version = "0.2.7" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" +checksum = "52ce7f38b242319f7cabaa6813055467063ecdc9d355bbb4ce0c68908cd8130e" dependencies = [ "utf8parse", ] @@ -63,9 +63,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.101" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" [[package]] name = "assert-instr-macro" @@ -84,15 +84,15 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "bitflags" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "cc" -version = "1.2.55" +version = "1.2.59" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29" +checksum = "b7a4d3ec6524d28a329fc53654bbadc9bdd7b0431f5d65f1a56ffb28a1ee5283" dependencies = [ "find-msvc-tools", "shlex", @@ -106,9 +106,9 @@ checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "clap" -version = "4.5.58" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63be97961acde393029492ce0be7a1af7e323e6bae9511ebfac33751be5e6806" +checksum = "b193af5b67834b676abd72466a96c1024e6a6ad978a1f484bd90b85c94041351" dependencies = [ "clap_builder", "clap_derive", @@ -116,9 +116,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.58" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f13174bda5dfd69d7e947827e5af4b0f2f94a4a3ee92912fba07a66150f21e2" +checksum = "714a53001bf66416adb0e2ef5ac857140e7dc3a0c48fb28b2f10762fc4b5069f" dependencies = [ "anstream", "anstyle", @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.55" +version = "4.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" +checksum = "1110bd8a634a1ab8cb04345d8d878267d57c3cf1b38d91b71af6686408bbca6a" dependencies = [ "heck", "proc-macro2", @@ -140,15 +140,15 @@ dependencies = [ [[package]] name = "clap_lex" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" +checksum = "c8d4a3bb8b1e0c1050499d1815f5ab16d04f0959b233085fb31653fbfc9d98f9" [[package]] name = "colorchoice" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" +checksum = "1d07550c9036bf2ae0c684c4297d503f838287c83c53686d05370d0e139ae570" [[package]] name = "core_arch" @@ -185,9 +185,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "darling" -version = "0.21.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" dependencies = [ "darling_core", "darling_macro", @@ -195,11 +195,10 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.21.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" dependencies = [ - "fnv", "ident_case", "proc-macro2", "quote", @@ -209,9 +208,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.21.3" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ "darling_core", "quote", @@ -232,9 +231,9 @@ checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "env_filter" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f" +checksum = "32e90c2accc4b07a8456ea0debdc2e7587bdd890680d71173a15d4ae604f6eef" dependencies = [ "log", "regex", @@ -255,9 +254,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.9" +version = "0.11.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" +checksum = "0621c04f2196ac3f488dd583365b9c09be011a4ab8b9f37248ffcc8f6198b56a" dependencies = [ "env_filter", "log", @@ -275,12 +274,6 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" -[[package]] -name = "fnv" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" - [[package]] name = "foldhash" version = "0.1.5" @@ -300,9 +293,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +checksum = "0de51e6874e94e7bf76d726fc5d13ba782deca734ff60d5bb2fb2607c7406555" dependencies = [ "cfg-if", "libc", @@ -375,9 +368,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.13.0" +version = "2.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +checksum = "45a8a2b9cb3e0b0c1803dbb0758ffac5de2f425b23c28f518faabd9d805342ff" dependencies = [ "equivalent", "hashbrown 0.16.1", @@ -430,9 +423,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.17" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" +checksum = "8f42a60cbdf9a97f5d2305f08a87dc4e09308d1276d28c869c684d7777685682" [[package]] name = "leb128fmt" @@ -442,9 +435,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.181" +version = "0.2.184" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459427e2af2b9c839b132acb702a1c654d95e10f8c326bfc2ad11310e458b1c5" +checksum = "48f5d2a454e16a5ea0f4ced81bd44e4cfc7bd3a507b61887c99fd3538b28e4af" [[package]] name = "linked-hash-map" @@ -534,25 +527,25 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95c589f335db0f6aaa168a7cd27b1fc6920f5e1470c804f814d9cd6e62a0f70b" dependencies = [ - "env_logger 0.11.9", + "env_logger 0.11.10", "log", "rand 0.10.0", ] [[package]] name = "quote" -version = "1.0.44" +version = "1.0.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" +checksum = "41f2619966050689382d2b44f664f4bc593e129785a36d6ee376ddf37259b924" dependencies = [ "proc-macro2", ] [[package]] name = "r-efi" -version = "5.3.0" +version = "6.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" +checksum = "f8dcc9c7d52a811697d2151c701e0d08956f92b0e24136cf4cf27b57a6a0d9bf" [[package]] name = "rand" @@ -571,7 +564,7 @@ version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8" dependencies = [ - "getrandom 0.4.1", + "getrandom 0.4.2", "rand_core 0.10.0", ] @@ -645,9 +638,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.9" +version = "0.8.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" +checksum = "dc897dd8d9e8bd1ed8cdad82b5966c3e0ecae09fb1907d58efaa013543185d0a" [[package]] name = "rustc-demangle" @@ -672,9 +665,9 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.27" +version = "1.0.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" +checksum = "8a7852d02fc848982e0c167ef163aaff9cd91dc640ba85e263cb1ce46fae51cd" [[package]] name = "serde" @@ -733,9 +726,9 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.16.1" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" +checksum = "dd5414fad8e6907dbdd5bc441a50ae8d6e26151a03b1de04d89a5576de61d01f" dependencies = [ "serde_core", "serde_with_macros", @@ -743,9 +736,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.16.1" +version = "3.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" +checksum = "d3db8978e608f1fe7357e211969fd9abdcae80bac1ba7a3369bb7eb6b404eb65" dependencies = [ "darling", "proc-macro2", @@ -801,6 +794,13 @@ dependencies = [ "regex", ] +[[package]] +name = "stdarch-gen-hexagon-scalar" +version = "0.1.0" +dependencies = [ + "regex", +] + [[package]] name = "stdarch-gen-loongarch" version = "0.1.0" @@ -849,9 +849,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "syn" -version = "2.0.115" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e614ed320ac28113fa64972c4262d5dbc89deacdfd00c34a3e4cea073243c12" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -895,9 +895,9 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-xid" @@ -962,7 +962,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" dependencies = [ "anyhow", - "indexmap 2.13.0", + "indexmap 2.13.1", "wasm-encoder", "wasmparser 0.244.0", ] @@ -974,7 +974,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "161296c618fa2d63f6ed5fffd1112937e803cb9ec71b32b01a76321555660917" dependencies = [ "bitflags", - "indexmap 2.13.0", + "indexmap 2.13.1", "semver", ] @@ -986,7 +986,7 @@ checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ "bitflags", "hashbrown 0.15.5", - "indexmap 2.13.0", + "indexmap 2.13.1", "semver", ] @@ -1053,7 +1053,7 @@ checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" dependencies = [ "anyhow", "heck", - "indexmap 2.13.0", + "indexmap 2.13.1", "prettyplease", "syn", "wasm-metadata", @@ -1084,7 +1084,7 @@ checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" dependencies = [ "anyhow", "bitflags", - "indexmap 2.13.0", + "indexmap 2.13.1", "log", "serde", "serde_derive", @@ -1103,7 +1103,7 @@ checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" dependencies = [ "anyhow", "id-arena", - "indexmap 2.13.0", + "indexmap 2.13.1", "log", "semver", "serde", @@ -1130,18 +1130,18 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.39" +version = "0.8.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" +checksum = "eed437bf9d6692032087e337407a86f04cd8d6a16a37199ed57949d415bd68e9" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.39" +version = "0.8.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" +checksum = "70e3cd084b1788766f53af483dd21f93881ff30d7320490ec3ef7526d203bad4" dependencies = [ "proc-macro2", "quote", diff --git a/library/stdarch/crates/core_arch/src/hexagon/mod.rs b/library/stdarch/crates/core_arch/src/hexagon/mod.rs index c370f3da15dfb..c973f7dc62e1a 100644 --- a/library/stdarch/crates/core_arch/src/hexagon/mod.rs +++ b/library/stdarch/crates/core_arch/src/hexagon/mod.rs @@ -1,13 +1,18 @@ //! Hexagon architecture intrinsics //! //! This module contains intrinsics for the Qualcomm Hexagon DSP architecture, -//! including the Hexagon Vector Extensions (HVX). +//! including scalar operations and the Hexagon Vector Extensions (HVX). +//! +//! ## Scalar Intrinsics +//! +//! The [`scalar`] module provides intrinsics for scalar DSP operations including +//! arithmetic, multiply, shift, saturate, compare, and floating-point operations. +//! +//! ## HVX Vector Intrinsics //! //! HVX is a wide SIMD architecture designed for high-performance signal processing, //! machine learning, and image processing workloads. //! -//! ## Vector Length Modes -//! //! HVX supports two vector length modes: //! - 64-byte mode (512-bit vectors): Use the [`v64`] module //! - 128-byte mode (1024-bit vectors): Use the [`v128`] module @@ -20,6 +25,10 @@ //! Note that HVX v66 and later default to 128-byte mode, while earlier versions //! (v60-v65) default to 64-byte mode. +/// Scalar intrinsics for Hexagon DSP operations +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub mod scalar; + /// HVX intrinsics for 64-byte vector mode (512-bit vectors) #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub mod v64; diff --git a/library/stdarch/crates/core_arch/src/hexagon/scalar.rs b/library/stdarch/crates/core_arch/src/hexagon/scalar.rs new file mode 100644 index 0000000000000..c906ec5166a19 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/hexagon/scalar.rs @@ -0,0 +1,12181 @@ +//! Hexagon scalar intrinsics +//! +//! This module provides intrinsics for scalar (non-HVX) Hexagon DSP operations, +//! including arithmetic, multiply, shift, saturate, compare, and floating-point +//! operations. +//! +//! [Hexagon V68 Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-45) +//! +//! ## Naming Convention +//! +//! Function names preserve the original Q6 naming case because the convention +//! uses case to distinguish register types: +//! - `P` (uppercase) = 64-bit register pair (`Word64`) +//! - `p` (lowercase) = predicate register (`Byte`) +//! +//! For example, `Q6_P_and_PP` operates on 64-bit pairs while `Q6_p_and_pp` +//! operates on predicate registers. +//! +//! ## Architecture Versions +//! +//! Most scalar intrinsics are available on all Hexagon architectures. +//! Some intrinsics require specific architecture versions (v60, v62, v65, +//! v66, v67, v68, or v67+audio) and carry +//! `#[target_feature(enable = "v68")]` (or the appropriate version). +//! Enable these with `-C target-feature=+v68` or by setting the target CPU +//! via `-C target-cpu=hexagonv68`. +//! +//! Each version includes all features from previous versions. + +#![allow(non_snake_case)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +// LLVM intrinsic declarations for Hexagon scalar operations +#[allow(improper_ctypes)] +unsafe extern "unadjusted" { + #[link_name = "llvm.hexagon.A2.abs"] + fn hexagon_A2_abs(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.absp"] + fn hexagon_A2_absp(_: i64) -> i64; + #[link_name = "llvm.hexagon.A2.abssat"] + fn hexagon_A2_abssat(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.add"] + fn hexagon_A2_add(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.h16.hh"] + fn hexagon_A2_addh_h16_hh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.h16.hl"] + fn hexagon_A2_addh_h16_hl(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.h16.lh"] + fn hexagon_A2_addh_h16_lh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.h16.ll"] + fn hexagon_A2_addh_h16_ll(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.h16.sat.hh"] + fn hexagon_A2_addh_h16_sat_hh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.h16.sat.hl"] + fn hexagon_A2_addh_h16_sat_hl(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.h16.sat.lh"] + fn hexagon_A2_addh_h16_sat_lh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.h16.sat.ll"] + fn hexagon_A2_addh_h16_sat_ll(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.l16.hl"] + fn hexagon_A2_addh_l16_hl(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.l16.ll"] + fn hexagon_A2_addh_l16_ll(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.l16.sat.hl"] + fn hexagon_A2_addh_l16_sat_hl(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addh.l16.sat.ll"] + fn hexagon_A2_addh_l16_sat_ll(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addi"] + fn hexagon_A2_addi(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addp"] + fn hexagon_A2_addp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.addpsat"] + fn hexagon_A2_addpsat(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.addsat"] + fn hexagon_A2_addsat(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.addsp"] + fn hexagon_A2_addsp(_: i32, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.and"] + fn hexagon_A2_and(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.andir"] + fn hexagon_A2_andir(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.andp"] + fn hexagon_A2_andp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.aslh"] + fn hexagon_A2_aslh(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.asrh"] + fn hexagon_A2_asrh(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.combine.hh"] + fn hexagon_A2_combine_hh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.combine.hl"] + fn hexagon_A2_combine_hl(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.combine.lh"] + fn hexagon_A2_combine_lh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.combine.ll"] + fn hexagon_A2_combine_ll(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.combineii"] + fn hexagon_A2_combineii(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.A2.combinew"] + fn hexagon_A2_combinew(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.A2.max"] + fn hexagon_A2_max(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.maxp"] + fn hexagon_A2_maxp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.maxu"] + fn hexagon_A2_maxu(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.maxup"] + fn hexagon_A2_maxup(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.min"] + fn hexagon_A2_min(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.minp"] + fn hexagon_A2_minp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.minu"] + fn hexagon_A2_minu(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.minup"] + fn hexagon_A2_minup(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.neg"] + fn hexagon_A2_neg(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.negp"] + fn hexagon_A2_negp(_: i64) -> i64; + #[link_name = "llvm.hexagon.A2.negsat"] + fn hexagon_A2_negsat(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.not"] + fn hexagon_A2_not(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.notp"] + fn hexagon_A2_notp(_: i64) -> i64; + #[link_name = "llvm.hexagon.A2.or"] + fn hexagon_A2_or(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.orir"] + fn hexagon_A2_orir(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.orp"] + fn hexagon_A2_orp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.roundsat"] + fn hexagon_A2_roundsat(_: i64) -> i32; + #[link_name = "llvm.hexagon.A2.sat"] + fn hexagon_A2_sat(_: i64) -> i32; + #[link_name = "llvm.hexagon.A2.satb"] + fn hexagon_A2_satb(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.sath"] + fn hexagon_A2_sath(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.satub"] + fn hexagon_A2_satub(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.satuh"] + fn hexagon_A2_satuh(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.sub"] + fn hexagon_A2_sub(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.h16.hh"] + fn hexagon_A2_subh_h16_hh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.h16.hl"] + fn hexagon_A2_subh_h16_hl(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.h16.lh"] + fn hexagon_A2_subh_h16_lh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.h16.ll"] + fn hexagon_A2_subh_h16_ll(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.h16.sat.hh"] + fn hexagon_A2_subh_h16_sat_hh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.h16.sat.hl"] + fn hexagon_A2_subh_h16_sat_hl(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.h16.sat.lh"] + fn hexagon_A2_subh_h16_sat_lh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.h16.sat.ll"] + fn hexagon_A2_subh_h16_sat_ll(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.l16.hl"] + fn hexagon_A2_subh_l16_hl(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.l16.ll"] + fn hexagon_A2_subh_l16_ll(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.l16.sat.hl"] + fn hexagon_A2_subh_l16_sat_hl(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subh.l16.sat.ll"] + fn hexagon_A2_subh_l16_sat_ll(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subp"] + fn hexagon_A2_subp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.subri"] + fn hexagon_A2_subri(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.subsat"] + fn hexagon_A2_subsat(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.svaddh"] + fn hexagon_A2_svaddh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.svaddhs"] + fn hexagon_A2_svaddhs(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.svadduhs"] + fn hexagon_A2_svadduhs(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.svavgh"] + fn hexagon_A2_svavgh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.svavghs"] + fn hexagon_A2_svavghs(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.svnavgh"] + fn hexagon_A2_svnavgh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.svsubh"] + fn hexagon_A2_svsubh(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.svsubhs"] + fn hexagon_A2_svsubhs(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.svsubuhs"] + fn hexagon_A2_svsubuhs(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.swiz"] + fn hexagon_A2_swiz(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.sxtb"] + fn hexagon_A2_sxtb(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.sxth"] + fn hexagon_A2_sxth(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.sxtw"] + fn hexagon_A2_sxtw(_: i32) -> i64; + #[link_name = "llvm.hexagon.A2.tfr"] + fn hexagon_A2_tfr(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.tfrih"] + fn hexagon_A2_tfrih(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.tfril"] + fn hexagon_A2_tfril(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.tfrp"] + fn hexagon_A2_tfrp(_: i64) -> i64; + #[link_name = "llvm.hexagon.A2.tfrpi"] + fn hexagon_A2_tfrpi(_: i32) -> i64; + #[link_name = "llvm.hexagon.A2.tfrsi"] + fn hexagon_A2_tfrsi(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.vabsh"] + fn hexagon_A2_vabsh(_: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vabshsat"] + fn hexagon_A2_vabshsat(_: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vabsw"] + fn hexagon_A2_vabsw(_: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vabswsat"] + fn hexagon_A2_vabswsat(_: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vaddb.map"] + fn hexagon_A2_vaddb_map(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vaddh"] + fn hexagon_A2_vaddh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vaddhs"] + fn hexagon_A2_vaddhs(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vaddub"] + fn hexagon_A2_vaddub(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vaddubs"] + fn hexagon_A2_vaddubs(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vadduhs"] + fn hexagon_A2_vadduhs(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vaddw"] + fn hexagon_A2_vaddw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vaddws"] + fn hexagon_A2_vaddws(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavgh"] + fn hexagon_A2_vavgh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavghcr"] + fn hexagon_A2_vavghcr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavghr"] + fn hexagon_A2_vavghr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavgub"] + fn hexagon_A2_vavgub(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavgubr"] + fn hexagon_A2_vavgubr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavguh"] + fn hexagon_A2_vavguh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavguhr"] + fn hexagon_A2_vavguhr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavguw"] + fn hexagon_A2_vavguw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavguwr"] + fn hexagon_A2_vavguwr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavgw"] + fn hexagon_A2_vavgw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavgwcr"] + fn hexagon_A2_vavgwcr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vavgwr"] + fn hexagon_A2_vavgwr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vcmpbeq"] + fn hexagon_A2_vcmpbeq(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.A2.vcmpbgtu"] + fn hexagon_A2_vcmpbgtu(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.A2.vcmpheq"] + fn hexagon_A2_vcmpheq(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.A2.vcmphgt"] + fn hexagon_A2_vcmphgt(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.A2.vcmphgtu"] + fn hexagon_A2_vcmphgtu(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.A2.vcmpweq"] + fn hexagon_A2_vcmpweq(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.A2.vcmpwgt"] + fn hexagon_A2_vcmpwgt(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.A2.vcmpwgtu"] + fn hexagon_A2_vcmpwgtu(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.A2.vconj"] + fn hexagon_A2_vconj(_: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vmaxb"] + fn hexagon_A2_vmaxb(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vmaxh"] + fn hexagon_A2_vmaxh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vmaxub"] + fn hexagon_A2_vmaxub(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vmaxuh"] + fn hexagon_A2_vmaxuh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vmaxuw"] + fn hexagon_A2_vmaxuw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vmaxw"] + fn hexagon_A2_vmaxw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vminb"] + fn hexagon_A2_vminb(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vminh"] + fn hexagon_A2_vminh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vminub"] + fn hexagon_A2_vminub(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vminuh"] + fn hexagon_A2_vminuh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vminuw"] + fn hexagon_A2_vminuw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vminw"] + fn hexagon_A2_vminw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vnavgh"] + fn hexagon_A2_vnavgh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vnavghcr"] + fn hexagon_A2_vnavghcr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vnavghr"] + fn hexagon_A2_vnavghr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vnavgw"] + fn hexagon_A2_vnavgw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vnavgwcr"] + fn hexagon_A2_vnavgwcr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vnavgwr"] + fn hexagon_A2_vnavgwr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vraddub"] + fn hexagon_A2_vraddub(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vraddub.acc"] + fn hexagon_A2_vraddub_acc(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vrsadub"] + fn hexagon_A2_vrsadub(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vrsadub.acc"] + fn hexagon_A2_vrsadub_acc(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vsubb.map"] + fn hexagon_A2_vsubb_map(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vsubh"] + fn hexagon_A2_vsubh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vsubhs"] + fn hexagon_A2_vsubhs(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vsubub"] + fn hexagon_A2_vsubub(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vsububs"] + fn hexagon_A2_vsububs(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vsubuhs"] + fn hexagon_A2_vsubuhs(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vsubw"] + fn hexagon_A2_vsubw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.vsubws"] + fn hexagon_A2_vsubws(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.xor"] + fn hexagon_A2_xor(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A2.xorp"] + fn hexagon_A2_xorp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A2.zxtb"] + fn hexagon_A2_zxtb(_: i32) -> i32; + #[link_name = "llvm.hexagon.A2.zxth"] + fn hexagon_A2_zxth(_: i32) -> i32; + #[link_name = "llvm.hexagon.A4.andn"] + fn hexagon_A4_andn(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.andnp"] + fn hexagon_A4_andnp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A4.bitsplit"] + fn hexagon_A4_bitsplit(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.bitspliti"] + fn hexagon_A4_bitspliti(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.boundscheck"] + fn hexagon_A4_boundscheck(_: i32, _: i64) -> i32; + #[link_name = "llvm.hexagon.A4.cmpbeq"] + fn hexagon_A4_cmpbeq(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmpbeqi"] + fn hexagon_A4_cmpbeqi(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmpbgt"] + fn hexagon_A4_cmpbgt(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmpbgti"] + fn hexagon_A4_cmpbgti(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmpbgtu"] + fn hexagon_A4_cmpbgtu(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmpbgtui"] + fn hexagon_A4_cmpbgtui(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmpheq"] + fn hexagon_A4_cmpheq(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmpheqi"] + fn hexagon_A4_cmpheqi(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmphgt"] + fn hexagon_A4_cmphgt(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmphgti"] + fn hexagon_A4_cmphgti(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmphgtu"] + fn hexagon_A4_cmphgtu(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cmphgtui"] + fn hexagon_A4_cmphgtui(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.combineir"] + fn hexagon_A4_combineir(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.combineri"] + fn hexagon_A4_combineri(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.cround.ri"] + fn hexagon_A4_cround_ri(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.cround.rr"] + fn hexagon_A4_cround_rr(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.modwrapu"] + fn hexagon_A4_modwrapu(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.orn"] + fn hexagon_A4_orn(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.ornp"] + fn hexagon_A4_ornp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A4.rcmpeq"] + fn hexagon_A4_rcmpeq(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.rcmpeqi"] + fn hexagon_A4_rcmpeqi(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.rcmpneq"] + fn hexagon_A4_rcmpneq(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.rcmpneqi"] + fn hexagon_A4_rcmpneqi(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.round.ri"] + fn hexagon_A4_round_ri(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.round.ri.sat"] + fn hexagon_A4_round_ri_sat(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.round.rr"] + fn hexagon_A4_round_rr(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.round.rr.sat"] + fn hexagon_A4_round_rr_sat(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.tlbmatch"] + fn hexagon_A4_tlbmatch(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.vcmpbeq.any"] + fn hexagon_A4_vcmpbeq_any(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.A4.vcmpbeqi"] + fn hexagon_A4_vcmpbeqi(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.vcmpbgt"] + fn hexagon_A4_vcmpbgt(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.A4.vcmpbgti"] + fn hexagon_A4_vcmpbgti(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.vcmpbgtui"] + fn hexagon_A4_vcmpbgtui(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.vcmpheqi"] + fn hexagon_A4_vcmpheqi(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.vcmphgti"] + fn hexagon_A4_vcmphgti(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.vcmphgtui"] + fn hexagon_A4_vcmphgtui(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.vcmpweqi"] + fn hexagon_A4_vcmpweqi(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.vcmpwgti"] + fn hexagon_A4_vcmpwgti(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.vcmpwgtui"] + fn hexagon_A4_vcmpwgtui(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.A4.vrmaxh"] + fn hexagon_A4_vrmaxh(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.vrmaxuh"] + fn hexagon_A4_vrmaxuh(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.vrmaxuw"] + fn hexagon_A4_vrmaxuw(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.vrmaxw"] + fn hexagon_A4_vrmaxw(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.vrminh"] + fn hexagon_A4_vrminh(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.vrminuh"] + fn hexagon_A4_vrminuh(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.vrminuw"] + fn hexagon_A4_vrminuw(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.A4.vrminw"] + fn hexagon_A4_vrminw(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.A5.vaddhubs"] + fn hexagon_A5_vaddhubs(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.C2.all8"] + fn hexagon_C2_all8(_: i32) -> i32; + #[link_name = "llvm.hexagon.C2.and"] + fn hexagon_C2_and(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.andn"] + fn hexagon_C2_andn(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.any8"] + fn hexagon_C2_any8(_: i32) -> i32; + #[link_name = "llvm.hexagon.C2.bitsclr"] + fn hexagon_C2_bitsclr(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.bitsclri"] + fn hexagon_C2_bitsclri(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.bitsset"] + fn hexagon_C2_bitsset(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.cmpeq"] + fn hexagon_C2_cmpeq(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.cmpeqi"] + fn hexagon_C2_cmpeqi(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.cmpeqp"] + fn hexagon_C2_cmpeqp(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.C2.cmpgei"] + fn hexagon_C2_cmpgei(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.cmpgeui"] + fn hexagon_C2_cmpgeui(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.cmpgt"] + fn hexagon_C2_cmpgt(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.cmpgti"] + fn hexagon_C2_cmpgti(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.cmpgtp"] + fn hexagon_C2_cmpgtp(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.C2.cmpgtu"] + fn hexagon_C2_cmpgtu(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.cmpgtui"] + fn hexagon_C2_cmpgtui(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.cmpgtup"] + fn hexagon_C2_cmpgtup(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.C2.cmplt"] + fn hexagon_C2_cmplt(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.cmpltu"] + fn hexagon_C2_cmpltu(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.mask"] + fn hexagon_C2_mask(_: i32) -> i64; + #[link_name = "llvm.hexagon.C2.mux"] + fn hexagon_C2_mux(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.muxii"] + fn hexagon_C2_muxii(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.muxir"] + fn hexagon_C2_muxir(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.muxri"] + fn hexagon_C2_muxri(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.not"] + fn hexagon_C2_not(_: i32) -> i32; + #[link_name = "llvm.hexagon.C2.or"] + fn hexagon_C2_or(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.orn"] + fn hexagon_C2_orn(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.pxfer.map"] + fn hexagon_C2_pxfer_map(_: i32) -> i32; + #[link_name = "llvm.hexagon.C2.tfrpr"] + fn hexagon_C2_tfrpr(_: i32) -> i32; + #[link_name = "llvm.hexagon.C2.tfrrp"] + fn hexagon_C2_tfrrp(_: i32) -> i32; + #[link_name = "llvm.hexagon.C2.vitpack"] + fn hexagon_C2_vitpack(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C2.vmux"] + fn hexagon_C2_vmux(_: i32, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.C2.xor"] + fn hexagon_C2_xor(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.and.and"] + fn hexagon_C4_and_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.and.andn"] + fn hexagon_C4_and_andn(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.and.or"] + fn hexagon_C4_and_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.and.orn"] + fn hexagon_C4_and_orn(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.cmplte"] + fn hexagon_C4_cmplte(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.cmpltei"] + fn hexagon_C4_cmpltei(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.cmplteu"] + fn hexagon_C4_cmplteu(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.cmplteui"] + fn hexagon_C4_cmplteui(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.cmpneq"] + fn hexagon_C4_cmpneq(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.cmpneqi"] + fn hexagon_C4_cmpneqi(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.fastcorner9"] + fn hexagon_C4_fastcorner9(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.fastcorner9.not"] + fn hexagon_C4_fastcorner9_not(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.nbitsclr"] + fn hexagon_C4_nbitsclr(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.nbitsclri"] + fn hexagon_C4_nbitsclri(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.nbitsset"] + fn hexagon_C4_nbitsset(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.or.and"] + fn hexagon_C4_or_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.or.andn"] + fn hexagon_C4_or_andn(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.or.or"] + fn hexagon_C4_or_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.C4.or.orn"] + fn hexagon_C4_or_orn(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.F2.conv.d2df"] + fn hexagon_F2_conv_d2df(_: i64) -> f64; + #[link_name = "llvm.hexagon.F2.conv.d2sf"] + fn hexagon_F2_conv_d2sf(_: i64) -> f32; + #[link_name = "llvm.hexagon.F2.conv.df2d"] + fn hexagon_F2_conv_df2d(_: f64) -> i64; + #[link_name = "llvm.hexagon.F2.conv.df2d.chop"] + fn hexagon_F2_conv_df2d_chop(_: f64) -> i64; + #[link_name = "llvm.hexagon.F2.conv.df2sf"] + fn hexagon_F2_conv_df2sf(_: f64) -> f32; + #[link_name = "llvm.hexagon.F2.conv.df2ud"] + fn hexagon_F2_conv_df2ud(_: f64) -> i64; + #[link_name = "llvm.hexagon.F2.conv.df2ud.chop"] + fn hexagon_F2_conv_df2ud_chop(_: f64) -> i64; + #[link_name = "llvm.hexagon.F2.conv.df2uw"] + fn hexagon_F2_conv_df2uw(_: f64) -> i32; + #[link_name = "llvm.hexagon.F2.conv.df2uw.chop"] + fn hexagon_F2_conv_df2uw_chop(_: f64) -> i32; + #[link_name = "llvm.hexagon.F2.conv.df2w"] + fn hexagon_F2_conv_df2w(_: f64) -> i32; + #[link_name = "llvm.hexagon.F2.conv.df2w.chop"] + fn hexagon_F2_conv_df2w_chop(_: f64) -> i32; + #[link_name = "llvm.hexagon.F2.conv.sf2d"] + fn hexagon_F2_conv_sf2d(_: f32) -> i64; + #[link_name = "llvm.hexagon.F2.conv.sf2d.chop"] + fn hexagon_F2_conv_sf2d_chop(_: f32) -> i64; + #[link_name = "llvm.hexagon.F2.conv.sf2df"] + fn hexagon_F2_conv_sf2df(_: f32) -> f64; + #[link_name = "llvm.hexagon.F2.conv.sf2ud"] + fn hexagon_F2_conv_sf2ud(_: f32) -> i64; + #[link_name = "llvm.hexagon.F2.conv.sf2ud.chop"] + fn hexagon_F2_conv_sf2ud_chop(_: f32) -> i64; + #[link_name = "llvm.hexagon.F2.conv.sf2uw"] + fn hexagon_F2_conv_sf2uw(_: f32) -> i32; + #[link_name = "llvm.hexagon.F2.conv.sf2uw.chop"] + fn hexagon_F2_conv_sf2uw_chop(_: f32) -> i32; + #[link_name = "llvm.hexagon.F2.conv.sf2w"] + fn hexagon_F2_conv_sf2w(_: f32) -> i32; + #[link_name = "llvm.hexagon.F2.conv.sf2w.chop"] + fn hexagon_F2_conv_sf2w_chop(_: f32) -> i32; + #[link_name = "llvm.hexagon.F2.conv.ud2df"] + fn hexagon_F2_conv_ud2df(_: i64) -> f64; + #[link_name = "llvm.hexagon.F2.conv.ud2sf"] + fn hexagon_F2_conv_ud2sf(_: i64) -> f32; + #[link_name = "llvm.hexagon.F2.conv.uw2df"] + fn hexagon_F2_conv_uw2df(_: i32) -> f64; + #[link_name = "llvm.hexagon.F2.conv.uw2sf"] + fn hexagon_F2_conv_uw2sf(_: i32) -> f32; + #[link_name = "llvm.hexagon.F2.conv.w2df"] + fn hexagon_F2_conv_w2df(_: i32) -> f64; + #[link_name = "llvm.hexagon.F2.conv.w2sf"] + fn hexagon_F2_conv_w2sf(_: i32) -> f32; + #[link_name = "llvm.hexagon.F2.dfclass"] + fn hexagon_F2_dfclass(_: f64, _: i32) -> i32; + #[link_name = "llvm.hexagon.F2.dfcmpeq"] + fn hexagon_F2_dfcmpeq(_: f64, _: f64) -> i32; + #[link_name = "llvm.hexagon.F2.dfcmpge"] + fn hexagon_F2_dfcmpge(_: f64, _: f64) -> i32; + #[link_name = "llvm.hexagon.F2.dfcmpgt"] + fn hexagon_F2_dfcmpgt(_: f64, _: f64) -> i32; + #[link_name = "llvm.hexagon.F2.dfcmpuo"] + fn hexagon_F2_dfcmpuo(_: f64, _: f64) -> i32; + #[link_name = "llvm.hexagon.F2.dfimm.n"] + fn hexagon_F2_dfimm_n(_: i32) -> f64; + #[link_name = "llvm.hexagon.F2.dfimm.p"] + fn hexagon_F2_dfimm_p(_: i32) -> f64; + #[link_name = "llvm.hexagon.F2.sfadd"] + fn hexagon_F2_sfadd(_: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sfclass"] + fn hexagon_F2_sfclass(_: f32, _: i32) -> i32; + #[link_name = "llvm.hexagon.F2.sfcmpeq"] + fn hexagon_F2_sfcmpeq(_: f32, _: f32) -> i32; + #[link_name = "llvm.hexagon.F2.sfcmpge"] + fn hexagon_F2_sfcmpge(_: f32, _: f32) -> i32; + #[link_name = "llvm.hexagon.F2.sfcmpgt"] + fn hexagon_F2_sfcmpgt(_: f32, _: f32) -> i32; + #[link_name = "llvm.hexagon.F2.sfcmpuo"] + fn hexagon_F2_sfcmpuo(_: f32, _: f32) -> i32; + #[link_name = "llvm.hexagon.F2.sffixupd"] + fn hexagon_F2_sffixupd(_: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sffixupn"] + fn hexagon_F2_sffixupn(_: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sffixupr"] + fn hexagon_F2_sffixupr(_: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sffma"] + fn hexagon_F2_sffma(_: f32, _: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sffma.lib"] + fn hexagon_F2_sffma_lib(_: f32, _: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sffma.sc"] + fn hexagon_F2_sffma_sc(_: f32, _: f32, _: f32, _: i32) -> f32; + #[link_name = "llvm.hexagon.F2.sffms"] + fn hexagon_F2_sffms(_: f32, _: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sffms.lib"] + fn hexagon_F2_sffms_lib(_: f32, _: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sfimm.n"] + fn hexagon_F2_sfimm_n(_: i32) -> f32; + #[link_name = "llvm.hexagon.F2.sfimm.p"] + fn hexagon_F2_sfimm_p(_: i32) -> f32; + #[link_name = "llvm.hexagon.F2.sfmax"] + fn hexagon_F2_sfmax(_: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sfmin"] + fn hexagon_F2_sfmin(_: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sfmpy"] + fn hexagon_F2_sfmpy(_: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.F2.sfsub"] + fn hexagon_F2_sfsub(_: f32, _: f32) -> f32; + #[link_name = "llvm.hexagon.M2.acci"] + fn hexagon_M2_acci(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.accii"] + fn hexagon_M2_accii(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.cmaci.s0"] + fn hexagon_M2_cmaci_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmacr.s0"] + fn hexagon_M2_cmacr_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmacs.s0"] + fn hexagon_M2_cmacs_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmacs.s1"] + fn hexagon_M2_cmacs_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmacsc.s0"] + fn hexagon_M2_cmacsc_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmacsc.s1"] + fn hexagon_M2_cmacsc_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmpyi.s0"] + fn hexagon_M2_cmpyi_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmpyr.s0"] + fn hexagon_M2_cmpyr_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmpyrs.s0"] + fn hexagon_M2_cmpyrs_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.cmpyrs.s1"] + fn hexagon_M2_cmpyrs_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.cmpyrsc.s0"] + fn hexagon_M2_cmpyrsc_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.cmpyrsc.s1"] + fn hexagon_M2_cmpyrsc_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.cmpys.s0"] + fn hexagon_M2_cmpys_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmpys.s1"] + fn hexagon_M2_cmpys_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmpysc.s0"] + fn hexagon_M2_cmpysc_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cmpysc.s1"] + fn hexagon_M2_cmpysc_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cnacs.s0"] + fn hexagon_M2_cnacs_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cnacs.s1"] + fn hexagon_M2_cnacs_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cnacsc.s0"] + fn hexagon_M2_cnacsc_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.cnacsc.s1"] + fn hexagon_M2_cnacsc_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.dpmpyss.acc.s0"] + fn hexagon_M2_dpmpyss_acc_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.dpmpyss.nac.s0"] + fn hexagon_M2_dpmpyss_nac_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.dpmpyss.rnd.s0"] + fn hexagon_M2_dpmpyss_rnd_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.dpmpyss.s0"] + fn hexagon_M2_dpmpyss_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.dpmpyuu.acc.s0"] + fn hexagon_M2_dpmpyuu_acc_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.dpmpyuu.nac.s0"] + fn hexagon_M2_dpmpyuu_nac_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.dpmpyuu.s0"] + fn hexagon_M2_dpmpyuu_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.hmmpyh.rs1"] + fn hexagon_M2_hmmpyh_rs1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.hmmpyh.s1"] + fn hexagon_M2_hmmpyh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.hmmpyl.rs1"] + fn hexagon_M2_hmmpyl_rs1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.hmmpyl.s1"] + fn hexagon_M2_hmmpyl_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.maci"] + fn hexagon_M2_maci(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.macsin"] + fn hexagon_M2_macsin(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.macsip"] + fn hexagon_M2_macsip(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mmachs.rs0"] + fn hexagon_M2_mmachs_rs0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmachs.rs1"] + fn hexagon_M2_mmachs_rs1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmachs.s0"] + fn hexagon_M2_mmachs_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmachs.s1"] + fn hexagon_M2_mmachs_s1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmacls.rs0"] + fn hexagon_M2_mmacls_rs0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmacls.rs1"] + fn hexagon_M2_mmacls_rs1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmacls.s0"] + fn hexagon_M2_mmacls_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmacls.s1"] + fn hexagon_M2_mmacls_s1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmacuhs.rs0"] + fn hexagon_M2_mmacuhs_rs0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmacuhs.rs1"] + fn hexagon_M2_mmacuhs_rs1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmacuhs.s0"] + fn hexagon_M2_mmacuhs_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmacuhs.s1"] + fn hexagon_M2_mmacuhs_s1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmaculs.rs0"] + fn hexagon_M2_mmaculs_rs0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmaculs.rs1"] + fn hexagon_M2_mmaculs_rs1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmaculs.s0"] + fn hexagon_M2_mmaculs_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmaculs.s1"] + fn hexagon_M2_mmaculs_s1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyh.rs0"] + fn hexagon_M2_mmpyh_rs0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyh.rs1"] + fn hexagon_M2_mmpyh_rs1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyh.s0"] + fn hexagon_M2_mmpyh_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyh.s1"] + fn hexagon_M2_mmpyh_s1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyl.rs0"] + fn hexagon_M2_mmpyl_rs0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyl.rs1"] + fn hexagon_M2_mmpyl_rs1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyl.s0"] + fn hexagon_M2_mmpyl_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyl.s1"] + fn hexagon_M2_mmpyl_s1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyuh.rs0"] + fn hexagon_M2_mmpyuh_rs0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyuh.rs1"] + fn hexagon_M2_mmpyuh_rs1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyuh.s0"] + fn hexagon_M2_mmpyuh_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyuh.s1"] + fn hexagon_M2_mmpyuh_s1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyul.rs0"] + fn hexagon_M2_mmpyul_rs0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyul.rs1"] + fn hexagon_M2_mmpyul_rs1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyul.s0"] + fn hexagon_M2_mmpyul_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mmpyul.s1"] + fn hexagon_M2_mmpyul_s1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.mpy.acc.hh.s0"] + fn hexagon_M2_mpy_acc_hh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.hh.s1"] + fn hexagon_M2_mpy_acc_hh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.hl.s0"] + fn hexagon_M2_mpy_acc_hl_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.hl.s1"] + fn hexagon_M2_mpy_acc_hl_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.lh.s0"] + fn hexagon_M2_mpy_acc_lh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.lh.s1"] + fn hexagon_M2_mpy_acc_lh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.ll.s0"] + fn hexagon_M2_mpy_acc_ll_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.ll.s1"] + fn hexagon_M2_mpy_acc_ll_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.sat.hh.s0"] + fn hexagon_M2_mpy_acc_sat_hh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.sat.hh.s1"] + fn hexagon_M2_mpy_acc_sat_hh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.sat.hl.s0"] + fn hexagon_M2_mpy_acc_sat_hl_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.sat.hl.s1"] + fn hexagon_M2_mpy_acc_sat_hl_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.sat.lh.s0"] + fn hexagon_M2_mpy_acc_sat_lh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.sat.lh.s1"] + fn hexagon_M2_mpy_acc_sat_lh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.sat.ll.s0"] + fn hexagon_M2_mpy_acc_sat_ll_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.acc.sat.ll.s1"] + fn hexagon_M2_mpy_acc_sat_ll_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.hh.s0"] + fn hexagon_M2_mpy_hh_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.hh.s1"] + fn hexagon_M2_mpy_hh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.hl.s0"] + fn hexagon_M2_mpy_hl_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.hl.s1"] + fn hexagon_M2_mpy_hl_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.lh.s0"] + fn hexagon_M2_mpy_lh_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.lh.s1"] + fn hexagon_M2_mpy_lh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.ll.s0"] + fn hexagon_M2_mpy_ll_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.ll.s1"] + fn hexagon_M2_mpy_ll_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.hh.s0"] + fn hexagon_M2_mpy_nac_hh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.hh.s1"] + fn hexagon_M2_mpy_nac_hh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.hl.s0"] + fn hexagon_M2_mpy_nac_hl_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.hl.s1"] + fn hexagon_M2_mpy_nac_hl_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.lh.s0"] + fn hexagon_M2_mpy_nac_lh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.lh.s1"] + fn hexagon_M2_mpy_nac_lh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.ll.s0"] + fn hexagon_M2_mpy_nac_ll_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.ll.s1"] + fn hexagon_M2_mpy_nac_ll_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.sat.hh.s0"] + fn hexagon_M2_mpy_nac_sat_hh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.sat.hh.s1"] + fn hexagon_M2_mpy_nac_sat_hh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.sat.hl.s0"] + fn hexagon_M2_mpy_nac_sat_hl_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.sat.hl.s1"] + fn hexagon_M2_mpy_nac_sat_hl_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.sat.lh.s0"] + fn hexagon_M2_mpy_nac_sat_lh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.sat.lh.s1"] + fn hexagon_M2_mpy_nac_sat_lh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.sat.ll.s0"] + fn hexagon_M2_mpy_nac_sat_ll_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.nac.sat.ll.s1"] + fn hexagon_M2_mpy_nac_sat_ll_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.rnd.hh.s0"] + fn hexagon_M2_mpy_rnd_hh_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.rnd.hh.s1"] + fn hexagon_M2_mpy_rnd_hh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.rnd.hl.s0"] + fn hexagon_M2_mpy_rnd_hl_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.rnd.hl.s1"] + fn hexagon_M2_mpy_rnd_hl_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.rnd.lh.s0"] + fn hexagon_M2_mpy_rnd_lh_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.rnd.lh.s1"] + fn hexagon_M2_mpy_rnd_lh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.rnd.ll.s0"] + fn hexagon_M2_mpy_rnd_ll_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.rnd.ll.s1"] + fn hexagon_M2_mpy_rnd_ll_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.hh.s0"] + fn hexagon_M2_mpy_sat_hh_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.hh.s1"] + fn hexagon_M2_mpy_sat_hh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.hl.s0"] + fn hexagon_M2_mpy_sat_hl_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.hl.s1"] + fn hexagon_M2_mpy_sat_hl_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.lh.s0"] + fn hexagon_M2_mpy_sat_lh_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.lh.s1"] + fn hexagon_M2_mpy_sat_lh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.ll.s0"] + fn hexagon_M2_mpy_sat_ll_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.ll.s1"] + fn hexagon_M2_mpy_sat_ll_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.rnd.hh.s0"] + fn hexagon_M2_mpy_sat_rnd_hh_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.rnd.hh.s1"] + fn hexagon_M2_mpy_sat_rnd_hh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.rnd.hl.s0"] + fn hexagon_M2_mpy_sat_rnd_hl_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.rnd.hl.s1"] + fn hexagon_M2_mpy_sat_rnd_hl_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.rnd.lh.s0"] + fn hexagon_M2_mpy_sat_rnd_lh_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.rnd.lh.s1"] + fn hexagon_M2_mpy_sat_rnd_lh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.rnd.ll.s0"] + fn hexagon_M2_mpy_sat_rnd_ll_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.sat.rnd.ll.s1"] + fn hexagon_M2_mpy_sat_rnd_ll_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.up"] + fn hexagon_M2_mpy_up(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.up.s1"] + fn hexagon_M2_mpy_up_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpy.up.s1.sat"] + fn hexagon_M2_mpy_up_s1_sat(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyd.acc.hh.s0"] + fn hexagon_M2_mpyd_acc_hh_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.acc.hh.s1"] + fn hexagon_M2_mpyd_acc_hh_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.acc.hl.s0"] + fn hexagon_M2_mpyd_acc_hl_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.acc.hl.s1"] + fn hexagon_M2_mpyd_acc_hl_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.acc.lh.s0"] + fn hexagon_M2_mpyd_acc_lh_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.acc.lh.s1"] + fn hexagon_M2_mpyd_acc_lh_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.acc.ll.s0"] + fn hexagon_M2_mpyd_acc_ll_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.acc.ll.s1"] + fn hexagon_M2_mpyd_acc_ll_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.hh.s0"] + fn hexagon_M2_mpyd_hh_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.hh.s1"] + fn hexagon_M2_mpyd_hh_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.hl.s0"] + fn hexagon_M2_mpyd_hl_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.hl.s1"] + fn hexagon_M2_mpyd_hl_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.lh.s0"] + fn hexagon_M2_mpyd_lh_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.lh.s1"] + fn hexagon_M2_mpyd_lh_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.ll.s0"] + fn hexagon_M2_mpyd_ll_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.ll.s1"] + fn hexagon_M2_mpyd_ll_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.nac.hh.s0"] + fn hexagon_M2_mpyd_nac_hh_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.nac.hh.s1"] + fn hexagon_M2_mpyd_nac_hh_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.nac.hl.s0"] + fn hexagon_M2_mpyd_nac_hl_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.nac.hl.s1"] + fn hexagon_M2_mpyd_nac_hl_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.nac.lh.s0"] + fn hexagon_M2_mpyd_nac_lh_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.nac.lh.s1"] + fn hexagon_M2_mpyd_nac_lh_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.nac.ll.s0"] + fn hexagon_M2_mpyd_nac_ll_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.nac.ll.s1"] + fn hexagon_M2_mpyd_nac_ll_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.rnd.hh.s0"] + fn hexagon_M2_mpyd_rnd_hh_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.rnd.hh.s1"] + fn hexagon_M2_mpyd_rnd_hh_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.rnd.hl.s0"] + fn hexagon_M2_mpyd_rnd_hl_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.rnd.hl.s1"] + fn hexagon_M2_mpyd_rnd_hl_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.rnd.lh.s0"] + fn hexagon_M2_mpyd_rnd_lh_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.rnd.lh.s1"] + fn hexagon_M2_mpyd_rnd_lh_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.rnd.ll.s0"] + fn hexagon_M2_mpyd_rnd_ll_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyd.rnd.ll.s1"] + fn hexagon_M2_mpyd_rnd_ll_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyi"] + fn hexagon_M2_mpyi(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpysmi"] + fn hexagon_M2_mpysmi(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpysu.up"] + fn hexagon_M2_mpysu_up(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.acc.hh.s0"] + fn hexagon_M2_mpyu_acc_hh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.acc.hh.s1"] + fn hexagon_M2_mpyu_acc_hh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.acc.hl.s0"] + fn hexagon_M2_mpyu_acc_hl_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.acc.hl.s1"] + fn hexagon_M2_mpyu_acc_hl_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.acc.lh.s0"] + fn hexagon_M2_mpyu_acc_lh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.acc.lh.s1"] + fn hexagon_M2_mpyu_acc_lh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.acc.ll.s0"] + fn hexagon_M2_mpyu_acc_ll_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.acc.ll.s1"] + fn hexagon_M2_mpyu_acc_ll_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.hh.s0"] + fn hexagon_M2_mpyu_hh_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.hh.s1"] + fn hexagon_M2_mpyu_hh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.hl.s0"] + fn hexagon_M2_mpyu_hl_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.hl.s1"] + fn hexagon_M2_mpyu_hl_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.lh.s0"] + fn hexagon_M2_mpyu_lh_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.lh.s1"] + fn hexagon_M2_mpyu_lh_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.ll.s0"] + fn hexagon_M2_mpyu_ll_s0(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.ll.s1"] + fn hexagon_M2_mpyu_ll_s1(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.nac.hh.s0"] + fn hexagon_M2_mpyu_nac_hh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.nac.hh.s1"] + fn hexagon_M2_mpyu_nac_hh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.nac.hl.s0"] + fn hexagon_M2_mpyu_nac_hl_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.nac.hl.s1"] + fn hexagon_M2_mpyu_nac_hl_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.nac.lh.s0"] + fn hexagon_M2_mpyu_nac_lh_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.nac.lh.s1"] + fn hexagon_M2_mpyu_nac_lh_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.nac.ll.s0"] + fn hexagon_M2_mpyu_nac_ll_s0(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.nac.ll.s1"] + fn hexagon_M2_mpyu_nac_ll_s1(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyu.up"] + fn hexagon_M2_mpyu_up(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.mpyud.acc.hh.s0"] + fn hexagon_M2_mpyud_acc_hh_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.acc.hh.s1"] + fn hexagon_M2_mpyud_acc_hh_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.acc.hl.s0"] + fn hexagon_M2_mpyud_acc_hl_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.acc.hl.s1"] + fn hexagon_M2_mpyud_acc_hl_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.acc.lh.s0"] + fn hexagon_M2_mpyud_acc_lh_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.acc.lh.s1"] + fn hexagon_M2_mpyud_acc_lh_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.acc.ll.s0"] + fn hexagon_M2_mpyud_acc_ll_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.acc.ll.s1"] + fn hexagon_M2_mpyud_acc_ll_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.hh.s0"] + fn hexagon_M2_mpyud_hh_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.hh.s1"] + fn hexagon_M2_mpyud_hh_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.hl.s0"] + fn hexagon_M2_mpyud_hl_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.hl.s1"] + fn hexagon_M2_mpyud_hl_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.lh.s0"] + fn hexagon_M2_mpyud_lh_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.lh.s1"] + fn hexagon_M2_mpyud_lh_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.ll.s0"] + fn hexagon_M2_mpyud_ll_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.ll.s1"] + fn hexagon_M2_mpyud_ll_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.nac.hh.s0"] + fn hexagon_M2_mpyud_nac_hh_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.nac.hh.s1"] + fn hexagon_M2_mpyud_nac_hh_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.nac.hl.s0"] + fn hexagon_M2_mpyud_nac_hl_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.nac.hl.s1"] + fn hexagon_M2_mpyud_nac_hl_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.nac.lh.s0"] + fn hexagon_M2_mpyud_nac_lh_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.nac.lh.s1"] + fn hexagon_M2_mpyud_nac_lh_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.nac.ll.s0"] + fn hexagon_M2_mpyud_nac_ll_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyud.nac.ll.s1"] + fn hexagon_M2_mpyud_nac_ll_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.mpyui"] + fn hexagon_M2_mpyui(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.nacci"] + fn hexagon_M2_nacci(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.naccii"] + fn hexagon_M2_naccii(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.subacc"] + fn hexagon_M2_subacc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.vabsdiffh"] + fn hexagon_M2_vabsdiffh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vabsdiffw"] + fn hexagon_M2_vabsdiffw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vcmac.s0.sat.i"] + fn hexagon_M2_vcmac_s0_sat_i(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vcmac.s0.sat.r"] + fn hexagon_M2_vcmac_s0_sat_r(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vcmpy.s0.sat.i"] + fn hexagon_M2_vcmpy_s0_sat_i(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vcmpy.s0.sat.r"] + fn hexagon_M2_vcmpy_s0_sat_r(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vcmpy.s1.sat.i"] + fn hexagon_M2_vcmpy_s1_sat_i(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vcmpy.s1.sat.r"] + fn hexagon_M2_vcmpy_s1_sat_r(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vdmacs.s0"] + fn hexagon_M2_vdmacs_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vdmacs.s1"] + fn hexagon_M2_vdmacs_s1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vdmpyrs.s0"] + fn hexagon_M2_vdmpyrs_s0(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M2.vdmpyrs.s1"] + fn hexagon_M2_vdmpyrs_s1(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M2.vdmpys.s0"] + fn hexagon_M2_vdmpys_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vdmpys.s1"] + fn hexagon_M2_vdmpys_s1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vmac2"] + fn hexagon_M2_vmac2(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vmac2es"] + fn hexagon_M2_vmac2es(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vmac2es.s0"] + fn hexagon_M2_vmac2es_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vmac2es.s1"] + fn hexagon_M2_vmac2es_s1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vmac2s.s0"] + fn hexagon_M2_vmac2s_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vmac2s.s1"] + fn hexagon_M2_vmac2s_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vmac2su.s0"] + fn hexagon_M2_vmac2su_s0(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vmac2su.s1"] + fn hexagon_M2_vmac2su_s1(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vmpy2es.s0"] + fn hexagon_M2_vmpy2es_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vmpy2es.s1"] + fn hexagon_M2_vmpy2es_s1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vmpy2s.s0"] + fn hexagon_M2_vmpy2s_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vmpy2s.s0pack"] + fn hexagon_M2_vmpy2s_s0pack(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.vmpy2s.s1"] + fn hexagon_M2_vmpy2s_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vmpy2s.s1pack"] + fn hexagon_M2_vmpy2s_s1pack(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.vmpy2su.s0"] + fn hexagon_M2_vmpy2su_s0(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vmpy2su.s1"] + fn hexagon_M2_vmpy2su_s1(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vraddh"] + fn hexagon_M2_vraddh(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M2.vradduh"] + fn hexagon_M2_vradduh(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M2.vrcmaci.s0"] + fn hexagon_M2_vrcmaci_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vrcmaci.s0c"] + fn hexagon_M2_vrcmaci_s0c(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vrcmacr.s0"] + fn hexagon_M2_vrcmacr_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vrcmacr.s0c"] + fn hexagon_M2_vrcmacr_s0c(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vrcmpyi.s0"] + fn hexagon_M2_vrcmpyi_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vrcmpyi.s0c"] + fn hexagon_M2_vrcmpyi_s0c(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vrcmpyr.s0"] + fn hexagon_M2_vrcmpyr_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vrcmpyr.s0c"] + fn hexagon_M2_vrcmpyr_s0c(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vrcmpys.acc.s1"] + fn hexagon_M2_vrcmpys_acc_s1(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vrcmpys.s1"] + fn hexagon_M2_vrcmpys_s1(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.M2.vrcmpys.s1rp"] + fn hexagon_M2_vrcmpys_s1rp(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.M2.vrmac.s0"] + fn hexagon_M2_vrmac_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.vrmpy.s0"] + fn hexagon_M2_vrmpy_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M2.xor.xacc"] + fn hexagon_M2_xor_xacc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.and.and"] + fn hexagon_M4_and_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.and.andn"] + fn hexagon_M4_and_andn(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.and.or"] + fn hexagon_M4_and_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.and.xor"] + fn hexagon_M4_and_xor(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.cmpyi.wh"] + fn hexagon_M4_cmpyi_wh(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.cmpyi.whc"] + fn hexagon_M4_cmpyi_whc(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.cmpyr.wh"] + fn hexagon_M4_cmpyr_wh(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.cmpyr.whc"] + fn hexagon_M4_cmpyr_whc(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.mac.up.s1.sat"] + fn hexagon_M4_mac_up_s1_sat(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.mpyri.addi"] + fn hexagon_M4_mpyri_addi(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.mpyri.addr"] + fn hexagon_M4_mpyri_addr(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.mpyri.addr.u2"] + fn hexagon_M4_mpyri_addr_u2(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.mpyrr.addi"] + fn hexagon_M4_mpyrr_addi(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.mpyrr.addr"] + fn hexagon_M4_mpyrr_addr(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.nac.up.s1.sat"] + fn hexagon_M4_nac_up_s1_sat(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.or.and"] + fn hexagon_M4_or_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.or.andn"] + fn hexagon_M4_or_andn(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.or.or"] + fn hexagon_M4_or_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.or.xor"] + fn hexagon_M4_or_xor(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.pmpyw"] + fn hexagon_M4_pmpyw(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M4.pmpyw.acc"] + fn hexagon_M4_pmpyw_acc(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M4.vpmpyh"] + fn hexagon_M4_vpmpyh(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M4.vpmpyh.acc"] + fn hexagon_M4_vpmpyh_acc(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M4.vrmpyeh.acc.s0"] + fn hexagon_M4_vrmpyeh_acc_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M4.vrmpyeh.acc.s1"] + fn hexagon_M4_vrmpyeh_acc_s1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M4.vrmpyeh.s0"] + fn hexagon_M4_vrmpyeh_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M4.vrmpyeh.s1"] + fn hexagon_M4_vrmpyeh_s1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M4.vrmpyoh.acc.s0"] + fn hexagon_M4_vrmpyoh_acc_s0(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M4.vrmpyoh.acc.s1"] + fn hexagon_M4_vrmpyoh_acc_s1(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M4.vrmpyoh.s0"] + fn hexagon_M4_vrmpyoh_s0(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M4.vrmpyoh.s1"] + fn hexagon_M4_vrmpyoh_s1(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M4.xor.and"] + fn hexagon_M4_xor_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.xor.andn"] + fn hexagon_M4_xor_andn(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.xor.or"] + fn hexagon_M4_xor_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M4.xor.xacc"] + fn hexagon_M4_xor_xacc(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M5.vdmacbsu"] + fn hexagon_M5_vdmacbsu(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M5.vdmpybsu"] + fn hexagon_M5_vdmpybsu(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M5.vmacbsu"] + fn hexagon_M5_vmacbsu(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M5.vmacbuu"] + fn hexagon_M5_vmacbuu(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M5.vmpybsu"] + fn hexagon_M5_vmpybsu(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M5.vmpybuu"] + fn hexagon_M5_vmpybuu(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.M5.vrmacbsu"] + fn hexagon_M5_vrmacbsu(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M5.vrmacbuu"] + fn hexagon_M5_vrmacbuu(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M5.vrmpybsu"] + fn hexagon_M5_vrmpybsu(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M5.vrmpybuu"] + fn hexagon_M5_vrmpybuu(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S2.addasl.rrri"] + fn hexagon_S2_addasl_rrri(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.i.p"] + fn hexagon_S2_asl_i_p(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.i.p.acc"] + fn hexagon_S2_asl_i_p_acc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.i.p.and"] + fn hexagon_S2_asl_i_p_and(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.i.p.nac"] + fn hexagon_S2_asl_i_p_nac(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.i.p.or"] + fn hexagon_S2_asl_i_p_or(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.i.p.xacc"] + fn hexagon_S2_asl_i_p_xacc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.i.r"] + fn hexagon_S2_asl_i_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.i.r.acc"] + fn hexagon_S2_asl_i_r_acc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.i.r.and"] + fn hexagon_S2_asl_i_r_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.i.r.nac"] + fn hexagon_S2_asl_i_r_nac(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.i.r.or"] + fn hexagon_S2_asl_i_r_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.i.r.sat"] + fn hexagon_S2_asl_i_r_sat(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.i.r.xacc"] + fn hexagon_S2_asl_i_r_xacc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.i.vh"] + fn hexagon_S2_asl_i_vh(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.i.vw"] + fn hexagon_S2_asl_i_vw(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.r.p"] + fn hexagon_S2_asl_r_p(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.r.p.acc"] + fn hexagon_S2_asl_r_p_acc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.r.p.and"] + fn hexagon_S2_asl_r_p_and(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.r.p.nac"] + fn hexagon_S2_asl_r_p_nac(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.r.p.or"] + fn hexagon_S2_asl_r_p_or(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.r.p.xor"] + fn hexagon_S2_asl_r_p_xor(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.r.r"] + fn hexagon_S2_asl_r_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.r.r.acc"] + fn hexagon_S2_asl_r_r_acc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.r.r.and"] + fn hexagon_S2_asl_r_r_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.r.r.nac"] + fn hexagon_S2_asl_r_r_nac(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.r.r.or"] + fn hexagon_S2_asl_r_r_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.r.r.sat"] + fn hexagon_S2_asl_r_r_sat(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asl.r.vh"] + fn hexagon_S2_asl_r_vh(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asl.r.vw"] + fn hexagon_S2_asl_r_vw(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.i.p"] + fn hexagon_S2_asr_i_p(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.i.p.acc"] + fn hexagon_S2_asr_i_p_acc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.i.p.and"] + fn hexagon_S2_asr_i_p_and(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.i.p.nac"] + fn hexagon_S2_asr_i_p_nac(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.i.p.or"] + fn hexagon_S2_asr_i_p_or(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.i.p.rnd"] + fn hexagon_S2_asr_i_p_rnd(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.i.p.rnd.goodsyntax"] + fn hexagon_S2_asr_i_p_rnd_goodsyntax(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.i.r"] + fn hexagon_S2_asr_i_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.i.r.acc"] + fn hexagon_S2_asr_i_r_acc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.i.r.and"] + fn hexagon_S2_asr_i_r_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.i.r.nac"] + fn hexagon_S2_asr_i_r_nac(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.i.r.or"] + fn hexagon_S2_asr_i_r_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.i.r.rnd"] + fn hexagon_S2_asr_i_r_rnd(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.i.r.rnd.goodsyntax"] + fn hexagon_S2_asr_i_r_rnd_goodsyntax(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.i.svw.trun"] + fn hexagon_S2_asr_i_svw_trun(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.i.vh"] + fn hexagon_S2_asr_i_vh(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.i.vw"] + fn hexagon_S2_asr_i_vw(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.r.p"] + fn hexagon_S2_asr_r_p(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.r.p.acc"] + fn hexagon_S2_asr_r_p_acc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.r.p.and"] + fn hexagon_S2_asr_r_p_and(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.r.p.nac"] + fn hexagon_S2_asr_r_p_nac(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.r.p.or"] + fn hexagon_S2_asr_r_p_or(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.r.p.xor"] + fn hexagon_S2_asr_r_p_xor(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.r.r"] + fn hexagon_S2_asr_r_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.r.r.acc"] + fn hexagon_S2_asr_r_r_acc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.r.r.and"] + fn hexagon_S2_asr_r_r_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.r.r.nac"] + fn hexagon_S2_asr_r_r_nac(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.r.r.or"] + fn hexagon_S2_asr_r_r_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.r.r.sat"] + fn hexagon_S2_asr_r_r_sat(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.r.svw.trun"] + fn hexagon_S2_asr_r_svw_trun(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.asr.r.vh"] + fn hexagon_S2_asr_r_vh(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.asr.r.vw"] + fn hexagon_S2_asr_r_vw(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.brev"] + fn hexagon_S2_brev(_: i32) -> i32; + #[link_name = "llvm.hexagon.S2.brevp"] + fn hexagon_S2_brevp(_: i64) -> i64; + #[link_name = "llvm.hexagon.S2.cl0"] + fn hexagon_S2_cl0(_: i32) -> i32; + #[link_name = "llvm.hexagon.S2.cl0p"] + fn hexagon_S2_cl0p(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.cl1"] + fn hexagon_S2_cl1(_: i32) -> i32; + #[link_name = "llvm.hexagon.S2.cl1p"] + fn hexagon_S2_cl1p(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.clb"] + fn hexagon_S2_clb(_: i32) -> i32; + #[link_name = "llvm.hexagon.S2.clbnorm"] + fn hexagon_S2_clbnorm(_: i32) -> i32; + #[link_name = "llvm.hexagon.S2.clbp"] + fn hexagon_S2_clbp(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.clrbit.i"] + fn hexagon_S2_clrbit_i(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.clrbit.r"] + fn hexagon_S2_clrbit_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.ct0"] + fn hexagon_S2_ct0(_: i32) -> i32; + #[link_name = "llvm.hexagon.S2.ct0p"] + fn hexagon_S2_ct0p(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.ct1"] + fn hexagon_S2_ct1(_: i32) -> i32; + #[link_name = "llvm.hexagon.S2.ct1p"] + fn hexagon_S2_ct1p(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.deinterleave"] + fn hexagon_S2_deinterleave(_: i64) -> i64; + #[link_name = "llvm.hexagon.S2.extractu"] + fn hexagon_S2_extractu(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.extractu.rp"] + fn hexagon_S2_extractu_rp(_: i32, _: i64) -> i32; + #[link_name = "llvm.hexagon.S2.extractup"] + fn hexagon_S2_extractup(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.extractup.rp"] + fn hexagon_S2_extractup_rp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S2.insert"] + fn hexagon_S2_insert(_: i32, _: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.insert.rp"] + fn hexagon_S2_insert_rp(_: i32, _: i32, _: i64) -> i32; + #[link_name = "llvm.hexagon.S2.insertp"] + fn hexagon_S2_insertp(_: i64, _: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.insertp.rp"] + fn hexagon_S2_insertp_rp(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S2.interleave"] + fn hexagon_S2_interleave(_: i64) -> i64; + #[link_name = "llvm.hexagon.S2.lfsp"] + fn hexagon_S2_lfsp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S2.lsl.r.p"] + fn hexagon_S2_lsl_r_p(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsl.r.p.acc"] + fn hexagon_S2_lsl_r_p_acc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsl.r.p.and"] + fn hexagon_S2_lsl_r_p_and(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsl.r.p.nac"] + fn hexagon_S2_lsl_r_p_nac(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsl.r.p.or"] + fn hexagon_S2_lsl_r_p_or(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsl.r.p.xor"] + fn hexagon_S2_lsl_r_p_xor(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsl.r.r"] + fn hexagon_S2_lsl_r_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsl.r.r.acc"] + fn hexagon_S2_lsl_r_r_acc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsl.r.r.and"] + fn hexagon_S2_lsl_r_r_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsl.r.r.nac"] + fn hexagon_S2_lsl_r_r_nac(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsl.r.r.or"] + fn hexagon_S2_lsl_r_r_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsl.r.vh"] + fn hexagon_S2_lsl_r_vh(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsl.r.vw"] + fn hexagon_S2_lsl_r_vw(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.i.p"] + fn hexagon_S2_lsr_i_p(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.i.p.acc"] + fn hexagon_S2_lsr_i_p_acc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.i.p.and"] + fn hexagon_S2_lsr_i_p_and(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.i.p.nac"] + fn hexagon_S2_lsr_i_p_nac(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.i.p.or"] + fn hexagon_S2_lsr_i_p_or(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.i.p.xacc"] + fn hexagon_S2_lsr_i_p_xacc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.i.r"] + fn hexagon_S2_lsr_i_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.i.r.acc"] + fn hexagon_S2_lsr_i_r_acc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.i.r.and"] + fn hexagon_S2_lsr_i_r_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.i.r.nac"] + fn hexagon_S2_lsr_i_r_nac(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.i.r.or"] + fn hexagon_S2_lsr_i_r_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.i.r.xacc"] + fn hexagon_S2_lsr_i_r_xacc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.i.vh"] + fn hexagon_S2_lsr_i_vh(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.i.vw"] + fn hexagon_S2_lsr_i_vw(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.r.p"] + fn hexagon_S2_lsr_r_p(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.r.p.acc"] + fn hexagon_S2_lsr_r_p_acc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.r.p.and"] + fn hexagon_S2_lsr_r_p_and(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.r.p.nac"] + fn hexagon_S2_lsr_r_p_nac(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.r.p.or"] + fn hexagon_S2_lsr_r_p_or(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.r.p.xor"] + fn hexagon_S2_lsr_r_p_xor(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.r.r"] + fn hexagon_S2_lsr_r_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.r.r.acc"] + fn hexagon_S2_lsr_r_r_acc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.r.r.and"] + fn hexagon_S2_lsr_r_r_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.r.r.nac"] + fn hexagon_S2_lsr_r_r_nac(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.r.r.or"] + fn hexagon_S2_lsr_r_r_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.lsr.r.vh"] + fn hexagon_S2_lsr_r_vh(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.lsr.r.vw"] + fn hexagon_S2_lsr_r_vw(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.packhl"] + fn hexagon_S2_packhl(_: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.parityp"] + fn hexagon_S2_parityp(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.S2.setbit.i"] + fn hexagon_S2_setbit_i(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.setbit.r"] + fn hexagon_S2_setbit_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.shuffeb"] + fn hexagon_S2_shuffeb(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S2.shuffeh"] + fn hexagon_S2_shuffeh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S2.shuffob"] + fn hexagon_S2_shuffob(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S2.shuffoh"] + fn hexagon_S2_shuffoh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S2.svsathb"] + fn hexagon_S2_svsathb(_: i32) -> i32; + #[link_name = "llvm.hexagon.S2.svsathub"] + fn hexagon_S2_svsathub(_: i32) -> i32; + #[link_name = "llvm.hexagon.S2.tableidxb.goodsyntax"] + fn hexagon_S2_tableidxb_goodsyntax(_: i32, _: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.tableidxd.goodsyntax"] + fn hexagon_S2_tableidxd_goodsyntax(_: i32, _: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.tableidxh.goodsyntax"] + fn hexagon_S2_tableidxh_goodsyntax(_: i32, _: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.tableidxw.goodsyntax"] + fn hexagon_S2_tableidxw_goodsyntax(_: i32, _: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.togglebit.i"] + fn hexagon_S2_togglebit_i(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.togglebit.r"] + fn hexagon_S2_togglebit_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.tstbit.i"] + fn hexagon_S2_tstbit_i(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.tstbit.r"] + fn hexagon_S2_tstbit_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.valignib"] + fn hexagon_S2_valignib(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.valignrb"] + fn hexagon_S2_valignrb(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.vcnegh"] + fn hexagon_S2_vcnegh(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.vcrotate"] + fn hexagon_S2_vcrotate(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.vrcnegh"] + fn hexagon_S2_vrcnegh(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.vrndpackwh"] + fn hexagon_S2_vrndpackwh(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.vrndpackwhs"] + fn hexagon_S2_vrndpackwhs(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.vsathb"] + fn hexagon_S2_vsathb(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.vsathb.nopack"] + fn hexagon_S2_vsathb_nopack(_: i64) -> i64; + #[link_name = "llvm.hexagon.S2.vsathub"] + fn hexagon_S2_vsathub(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.vsathub.nopack"] + fn hexagon_S2_vsathub_nopack(_: i64) -> i64; + #[link_name = "llvm.hexagon.S2.vsatwh"] + fn hexagon_S2_vsatwh(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.vsatwh.nopack"] + fn hexagon_S2_vsatwh_nopack(_: i64) -> i64; + #[link_name = "llvm.hexagon.S2.vsatwuh"] + fn hexagon_S2_vsatwuh(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.vsatwuh.nopack"] + fn hexagon_S2_vsatwuh_nopack(_: i64) -> i64; + #[link_name = "llvm.hexagon.S2.vsplatrb"] + fn hexagon_S2_vsplatrb(_: i32) -> i32; + #[link_name = "llvm.hexagon.S2.vsplatrh"] + fn hexagon_S2_vsplatrh(_: i32) -> i64; + #[link_name = "llvm.hexagon.S2.vspliceib"] + fn hexagon_S2_vspliceib(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.vsplicerb"] + fn hexagon_S2_vsplicerb(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S2.vsxtbh"] + fn hexagon_S2_vsxtbh(_: i32) -> i64; + #[link_name = "llvm.hexagon.S2.vsxthw"] + fn hexagon_S2_vsxthw(_: i32) -> i64; + #[link_name = "llvm.hexagon.S2.vtrunehb"] + fn hexagon_S2_vtrunehb(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.vtrunewh"] + fn hexagon_S2_vtrunewh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S2.vtrunohb"] + fn hexagon_S2_vtrunohb(_: i64) -> i32; + #[link_name = "llvm.hexagon.S2.vtrunowh"] + fn hexagon_S2_vtrunowh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S2.vzxtbh"] + fn hexagon_S2_vzxtbh(_: i32) -> i64; + #[link_name = "llvm.hexagon.S2.vzxthw"] + fn hexagon_S2_vzxthw(_: i32) -> i64; + #[link_name = "llvm.hexagon.S4.addaddi"] + fn hexagon_S4_addaddi(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.addi.asl.ri"] + fn hexagon_S4_addi_asl_ri(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.addi.lsr.ri"] + fn hexagon_S4_addi_lsr_ri(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.andi.asl.ri"] + fn hexagon_S4_andi_asl_ri(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.andi.lsr.ri"] + fn hexagon_S4_andi_lsr_ri(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.clbaddi"] + fn hexagon_S4_clbaddi(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.clbpaddi"] + fn hexagon_S4_clbpaddi(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.clbpnorm"] + fn hexagon_S4_clbpnorm(_: i64) -> i32; + #[link_name = "llvm.hexagon.S4.extract"] + fn hexagon_S4_extract(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.extract.rp"] + fn hexagon_S4_extract_rp(_: i32, _: i64) -> i32; + #[link_name = "llvm.hexagon.S4.extractp"] + fn hexagon_S4_extractp(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.S4.extractp.rp"] + fn hexagon_S4_extractp_rp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S4.lsli"] + fn hexagon_S4_lsli(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.ntstbit.i"] + fn hexagon_S4_ntstbit_i(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.ntstbit.r"] + fn hexagon_S4_ntstbit_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.or.andi"] + fn hexagon_S4_or_andi(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.or.andix"] + fn hexagon_S4_or_andix(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.or.ori"] + fn hexagon_S4_or_ori(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.ori.asl.ri"] + fn hexagon_S4_ori_asl_ri(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.ori.lsr.ri"] + fn hexagon_S4_ori_lsr_ri(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.parity"] + fn hexagon_S4_parity(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.subaddi"] + fn hexagon_S4_subaddi(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.subi.asl.ri"] + fn hexagon_S4_subi_asl_ri(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.subi.lsr.ri"] + fn hexagon_S4_subi_lsr_ri(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S4.vrcrotate"] + fn hexagon_S4_vrcrotate(_: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.S4.vrcrotate.acc"] + fn hexagon_S4_vrcrotate_acc(_: i64, _: i64, _: i32, _: i32) -> i64; + #[link_name = "llvm.hexagon.S4.vxaddsubh"] + fn hexagon_S4_vxaddsubh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S4.vxaddsubhr"] + fn hexagon_S4_vxaddsubhr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S4.vxaddsubw"] + fn hexagon_S4_vxaddsubw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S4.vxsubaddh"] + fn hexagon_S4_vxsubaddh(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S4.vxsubaddhr"] + fn hexagon_S4_vxsubaddhr(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S4.vxsubaddw"] + fn hexagon_S4_vxsubaddw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S5.asrhub.rnd.sat.goodsyntax"] + fn hexagon_S5_asrhub_rnd_sat_goodsyntax(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.S5.asrhub.sat"] + fn hexagon_S5_asrhub_sat(_: i64, _: i32) -> i32; + #[link_name = "llvm.hexagon.S5.popcountp"] + fn hexagon_S5_popcountp(_: i64) -> i32; + #[link_name = "llvm.hexagon.S5.vasrhrnd.goodsyntax"] + fn hexagon_S5_vasrhrnd_goodsyntax(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.Y2.dccleana"] + fn hexagon_Y2_dccleana(_: i32); + #[link_name = "llvm.hexagon.Y2.dccleaninva"] + fn hexagon_Y2_dccleaninva(_: i32); + #[link_name = "llvm.hexagon.Y2.dcfetch"] + fn hexagon_Y2_dcfetch(_: i32); + #[link_name = "llvm.hexagon.Y2.dcinva"] + fn hexagon_Y2_dcinva(_: i32); + #[link_name = "llvm.hexagon.Y2.dczeroa"] + fn hexagon_Y2_dczeroa(_: i32); + #[link_name = "llvm.hexagon.Y4.l2fetch"] + fn hexagon_Y4_l2fetch(_: i32, _: i32); + #[link_name = "llvm.hexagon.Y5.l2fetch"] + fn hexagon_Y5_l2fetch(_: i32, _: i64); + #[link_name = "llvm.hexagon.S6.rol.i.p"] + fn hexagon_S6_rol_i_p(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S6.rol.i.p.acc"] + fn hexagon_S6_rol_i_p_acc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S6.rol.i.p.and"] + fn hexagon_S6_rol_i_p_and(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S6.rol.i.p.nac"] + fn hexagon_S6_rol_i_p_nac(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S6.rol.i.p.or"] + fn hexagon_S6_rol_i_p_or(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S6.rol.i.p.xacc"] + fn hexagon_S6_rol_i_p_xacc(_: i64, _: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.S6.rol.i.r"] + fn hexagon_S6_rol_i_r(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S6.rol.i.r.acc"] + fn hexagon_S6_rol_i_r_acc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S6.rol.i.r.and"] + fn hexagon_S6_rol_i_r_and(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S6.rol.i.r.nac"] + fn hexagon_S6_rol_i_r_nac(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S6.rol.i.r.or"] + fn hexagon_S6_rol_i_r_or(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S6.rol.i.r.xacc"] + fn hexagon_S6_rol_i_r_xacc(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.M6.vabsdiffb"] + fn hexagon_M6_vabsdiffb(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M6.vabsdiffub"] + fn hexagon_M6_vabsdiffub(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S6.vsplatrbp"] + fn hexagon_S6_vsplatrbp(_: i32) -> i64; + #[link_name = "llvm.hexagon.S6.vtrunehb.ppp"] + fn hexagon_S6_vtrunehb_ppp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.S6.vtrunohb.ppp"] + fn hexagon_S6_vtrunohb_ppp(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.A6.vcmpbeq.notany"] + fn hexagon_A6_vcmpbeq_notany(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.F2.dfadd"] + fn hexagon_F2_dfadd(_: f64, _: f64) -> f64; + #[link_name = "llvm.hexagon.F2.dfsub"] + fn hexagon_F2_dfsub(_: f64, _: f64) -> f64; + #[link_name = "llvm.hexagon.M2.mnaci"] + fn hexagon_M2_mnaci(_: i32, _: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.S2.mask"] + fn hexagon_S2_mask(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A7.clip"] + fn hexagon_A7_clip(_: i32, _: i32) -> i32; + #[link_name = "llvm.hexagon.A7.croundd.ri"] + fn hexagon_A7_croundd_ri(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.A7.croundd.rr"] + fn hexagon_A7_croundd_rr(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.A7.vclip"] + fn hexagon_A7_vclip(_: i64, _: i32) -> i64; + #[link_name = "llvm.hexagon.F2.dfmax"] + fn hexagon_F2_dfmax(_: f64, _: f64) -> f64; + #[link_name = "llvm.hexagon.F2.dfmin"] + fn hexagon_F2_dfmin(_: f64, _: f64) -> f64; + #[link_name = "llvm.hexagon.F2.dfmpyfix"] + fn hexagon_F2_dfmpyfix(_: f64, _: f64) -> f64; + #[link_name = "llvm.hexagon.F2.dfmpyhh"] + fn hexagon_F2_dfmpyhh(_: f64, _: f64, _: f64) -> f64; + #[link_name = "llvm.hexagon.F2.dfmpylh"] + fn hexagon_F2_dfmpylh(_: f64, _: f64, _: f64) -> f64; + #[link_name = "llvm.hexagon.F2.dfmpyll"] + fn hexagon_F2_dfmpyll(_: f64, _: f64) -> f64; + #[link_name = "llvm.hexagon.M7.dcmpyiw"] + fn hexagon_M7_dcmpyiw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M7.dcmpyiw.acc"] + fn hexagon_M7_dcmpyiw_acc(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M7.dcmpyiwc"] + fn hexagon_M7_dcmpyiwc(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M7.dcmpyiwc.acc"] + fn hexagon_M7_dcmpyiwc_acc(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M7.dcmpyrw"] + fn hexagon_M7_dcmpyrw(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M7.dcmpyrw.acc"] + fn hexagon_M7_dcmpyrw_acc(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M7.dcmpyrwc"] + fn hexagon_M7_dcmpyrwc(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M7.dcmpyrwc.acc"] + fn hexagon_M7_dcmpyrwc_acc(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M7.vdmpy"] + fn hexagon_M7_vdmpy(_: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M7.vdmpy.acc"] + fn hexagon_M7_vdmpy_acc(_: i64, _: i64, _: i64) -> i64; + #[link_name = "llvm.hexagon.M7.wcmpyiw"] + fn hexagon_M7_wcmpyiw(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M7.wcmpyiw.rnd"] + fn hexagon_M7_wcmpyiw_rnd(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M7.wcmpyiwc"] + fn hexagon_M7_wcmpyiwc(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M7.wcmpyiwc.rnd"] + fn hexagon_M7_wcmpyiwc_rnd(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M7.wcmpyrw"] + fn hexagon_M7_wcmpyrw(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M7.wcmpyrw.rnd"] + fn hexagon_M7_wcmpyrw_rnd(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M7.wcmpyrwc"] + fn hexagon_M7_wcmpyrwc(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.M7.wcmpyrwc.rnd"] + fn hexagon_M7_wcmpyrwc_rnd(_: i64, _: i64) -> i32; + #[link_name = "llvm.hexagon.Y6.dmlink"] + fn hexagon_Y6_dmlink(_: i32, _: i32); + #[link_name = "llvm.hexagon.Y6.dmpause"] + fn hexagon_Y6_dmpause() -> i32; + #[link_name = "llvm.hexagon.Y6.dmpoll"] + fn hexagon_Y6_dmpoll() -> i32; + #[link_name = "llvm.hexagon.Y6.dmresume"] + fn hexagon_Y6_dmresume(_: i32); + #[link_name = "llvm.hexagon.Y6.dmstart"] + fn hexagon_Y6_dmstart(_: i32); + #[link_name = "llvm.hexagon.Y6.dmwait"] + fn hexagon_Y6_dmwait() -> i32; +} + +/// `Rd32=abs(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(abs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_abs_R(rs: i32) -> i32 { + hexagon_A2_abs(rs) +} + +/// `Rdd32=abs(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(abs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_abs_P(rss: i64) -> i64 { + hexagon_A2_absp(rss) +} + +/// `Rd32=abs(Rs32):sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(abs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_abs_R_sat(rs: i32) -> i32 { + hexagon_A2_abssat(rs) +} + +/// `Rd32=add(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RR(rs: i32, rt: i32) -> i32 { + hexagon_A2_add(rs, rt) +} + +/// `Rd32=add(Rt32.h,Rs32.h):<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RhRh_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_h16_hh(rt, rs) +} + +/// `Rd32=add(Rt32.h,Rs32.l):<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RhRl_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_h16_hl(rt, rs) +} + +/// `Rd32=add(Rt32.l,Rs32.h):<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RlRh_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_h16_lh(rt, rs) +} + +/// `Rd32=add(Rt32.l,Rs32.l):<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RlRl_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_h16_ll(rt, rs) +} + +/// `Rd32=add(Rt32.h,Rs32.h):sat:<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RhRh_sat_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_h16_sat_hh(rt, rs) +} + +/// `Rd32=add(Rt32.h,Rs32.l):sat:<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RhRl_sat_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_h16_sat_hl(rt, rs) +} + +/// `Rd32=add(Rt32.l,Rs32.h):sat:<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RlRh_sat_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_h16_sat_lh(rt, rs) +} + +/// `Rd32=add(Rt32.l,Rs32.l):sat:<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RlRl_sat_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_h16_sat_ll(rt, rs) +} + +/// `Rd32=add(Rt32.l,Rs32.h)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RlRh(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_l16_hl(rt, rs) +} + +/// `Rd32=add(Rt32.l,Rs32.l)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RlRl(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_l16_ll(rt, rs) +} + +/// `Rd32=add(Rt32.l,Rs32.h):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RlRh_sat(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_l16_sat_hl(rt, rs) +} + +/// `Rd32=add(Rt32.l,Rs32.l):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RlRl_sat(rt: i32, rs: i32) -> i32 { + hexagon_A2_addh_l16_sat_ll(rt, rs) +} + +/// `Rd32=add(Rs32,#s16)` +/// +/// Instruction Type: ALU32_ADDI +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(add, IS16 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS16, 16); + hexagon_A2_addi(rs, IS16) +} + +/// `Rdd32=add(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_add_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_addp(rss, rtt) +} + +/// `Rdd32=add(Rss32,Rtt32):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_add_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_A2_addpsat(rss, rtt) +} + +/// `Rd32=add(Rs32,Rt32):sat` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_RR_sat(rs: i32, rt: i32) -> i32 { + hexagon_A2_addsat(rs, rt) +} + +/// `Rdd32=add(Rs32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_add_RP(rs: i32, rtt: i64) -> i64 { + hexagon_A2_addsp(rs, rtt) +} + +/// `Rd32=and(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_and_RR(rs: i32, rt: i32) -> i32 { + hexagon_A2_and(rs, rt) +} + +/// `Rd32=and(Rs32,#s10)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(and, IS10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_and_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS10, 10); + hexagon_A2_andir(rs, IS10) +} + +/// `Rdd32=and(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_and_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_andp(rss, rtt) +} + +/// `Rd32=aslh(Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(aslh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_aslh_R(rs: i32) -> i32 { + hexagon_A2_aslh(rs) +} + +/// `Rd32=asrh(Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(asrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asrh_R(rs: i32) -> i32 { + hexagon_A2_asrh(rs) +} + +/// `Rd32=combine(Rt32.h,Rs32.h)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(combine))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_combine_RhRh(rt: i32, rs: i32) -> i32 { + hexagon_A2_combine_hh(rt, rs) +} + +/// `Rd32=combine(Rt32.h,Rs32.l)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(combine))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_combine_RhRl(rt: i32, rs: i32) -> i32 { + hexagon_A2_combine_hl(rt, rs) +} + +/// `Rd32=combine(Rt32.l,Rs32.h)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(combine))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_combine_RlRh(rt: i32, rs: i32) -> i32 { + hexagon_A2_combine_lh(rt, rs) +} + +/// `Rd32=combine(Rt32.l,Rs32.l)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(combine))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_combine_RlRl(rt: i32, rs: i32) -> i32 { + hexagon_A2_combine_ll(rt, rs) +} + +/// `Rdd32=combine(#s8,#S8)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(0, 1)] +#[cfg_attr(test, assert_instr(combine, IS8 = 0, IS8_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_combine_II() -> i64 { + static_assert_simm_bits!(IS8, 8); + static_assert_simm_bits!(IS8_2, 8); + hexagon_A2_combineii(IS8, IS8_2) +} + +/// `Rdd32=combine(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(combine))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_combine_RR(rs: i32, rt: i32) -> i64 { + hexagon_A2_combinew(rs, rt) +} + +/// `Rd32=max(Rs32,Rt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(max))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_max_RR(rs: i32, rt: i32) -> i32 { + hexagon_A2_max(rs, rt) +} + +/// `Rdd32=max(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(max))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_max_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_maxp(rss, rtt) +} + +/// `Rd32=maxu(Rs32,Rt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(maxu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_maxu_RR(rs: i32, rt: i32) -> i32 { + hexagon_A2_maxu(rs, rt) +} + +/// `Rdd32=maxu(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(maxu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_maxu_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_maxup(rss, rtt) +} + +/// `Rd32=min(Rt32,Rs32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(min))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_min_RR(rt: i32, rs: i32) -> i32 { + hexagon_A2_min(rt, rs) +} + +/// `Rdd32=min(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(min))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_min_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_minp(rtt, rss) +} + +/// `Rd32=minu(Rt32,Rs32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(minu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_minu_RR(rt: i32, rs: i32) -> i32 { + hexagon_A2_minu(rt, rs) +} + +/// `Rdd32=minu(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(minu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_minu_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_minup(rtt, rss) +} + +/// `Rd32=neg(Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(neg))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_neg_R(rs: i32) -> i32 { + hexagon_A2_neg(rs) +} + +/// `Rdd32=neg(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(neg))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_neg_P(rss: i64) -> i64 { + hexagon_A2_negp(rss) +} + +/// `Rd32=neg(Rs32):sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(neg))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_neg_R_sat(rs: i32) -> i32 { + hexagon_A2_negsat(rs) +} + +/// `Rd32=not(Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(not))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_not_R(rs: i32) -> i32 { + hexagon_A2_not(rs) +} + +/// `Rdd32=not(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(not))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_not_P(rss: i64) -> i64 { + hexagon_A2_notp(rss) +} + +/// `Rd32=or(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_or_RR(rs: i32, rt: i32) -> i32 { + hexagon_A2_or(rs, rt) +} + +/// `Rd32=or(Rs32,#s10)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(or, IS10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_or_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS10, 10); + hexagon_A2_orir(rs, IS10) +} + +/// `Rdd32=or(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_or_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_orp(rss, rtt) +} + +/// `Rd32=round(Rss32):sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(round))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_round_P_sat(rss: i64) -> i32 { + hexagon_A2_roundsat(rss) +} + +/// `Rd32=sat(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sat_P(rss: i64) -> i32 { + hexagon_A2_sat(rss) +} + +/// `Rd32=satb(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(satb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_satb_R(rs: i32) -> i32 { + hexagon_A2_satb(rs) +} + +/// `Rd32=sath(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sath))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sath_R(rs: i32) -> i32 { + hexagon_A2_sath(rs) +} + +/// `Rd32=satub(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(satub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_satub_R(rs: i32) -> i32 { + hexagon_A2_satub(rs) +} + +/// `Rd32=satuh(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(satuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_satuh_R(rs: i32) -> i32 { + hexagon_A2_satuh(rs) +} + +/// `Rd32=sub(Rt32,Rs32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RR(rt: i32, rs: i32) -> i32 { + hexagon_A2_sub(rt, rs) +} + +/// `Rd32=sub(Rt32.h,Rs32.h):<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RhRh_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_h16_hh(rt, rs) +} + +/// `Rd32=sub(Rt32.h,Rs32.l):<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RhRl_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_h16_hl(rt, rs) +} + +/// `Rd32=sub(Rt32.l,Rs32.h):<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RlRh_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_h16_lh(rt, rs) +} + +/// `Rd32=sub(Rt32.l,Rs32.l):<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RlRl_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_h16_ll(rt, rs) +} + +/// `Rd32=sub(Rt32.h,Rs32.h):sat:<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RhRh_sat_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_h16_sat_hh(rt, rs) +} + +/// `Rd32=sub(Rt32.h,Rs32.l):sat:<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RhRl_sat_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_h16_sat_hl(rt, rs) +} + +/// `Rd32=sub(Rt32.l,Rs32.h):sat:<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RlRh_sat_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_h16_sat_lh(rt, rs) +} + +/// `Rd32=sub(Rt32.l,Rs32.l):sat:<<16` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RlRl_sat_s16(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_h16_sat_ll(rt, rs) +} + +/// `Rd32=sub(Rt32.l,Rs32.h)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RlRh(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_l16_hl(rt, rs) +} + +/// `Rd32=sub(Rt32.l,Rs32.l)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RlRl(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_l16_ll(rt, rs) +} + +/// `Rd32=sub(Rt32.l,Rs32.h):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RlRh_sat(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_l16_sat_hl(rt, rs) +} + +/// `Rd32=sub(Rt32.l,Rs32.l):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RlRl_sat(rt: i32, rs: i32) -> i32 { + hexagon_A2_subh_l16_sat_ll(rt, rs) +} + +/// `Rdd32=sub(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_sub_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_subp(rtt, rss) +} + +/// `Rd32=sub(#s10,Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(0)] +#[cfg_attr(test, assert_instr(sub, IS10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_IR(rs: i32) -> i32 { + static_assert_simm_bits!(IS10, 10); + hexagon_A2_subri(IS10, rs) +} + +/// `Rd32=sub(Rt32,Rs32):sat` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_RR_sat(rt: i32, rs: i32) -> i32 { + hexagon_A2_subsat(rt, rs) +} + +/// `Rd32=vaddh(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vaddh_RR(rs: i32, rt: i32) -> i32 { + hexagon_A2_svaddh(rs, rt) +} + +/// `Rd32=vaddh(Rs32,Rt32):sat` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vaddh_RR_sat(rs: i32, rt: i32) -> i32 { + hexagon_A2_svaddhs(rs, rt) +} + +/// `Rd32=vadduh(Rs32,Rt32):sat` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vadduh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vadduh_RR_sat(rs: i32, rt: i32) -> i32 { + hexagon_A2_svadduhs(rs, rt) +} + +/// `Rd32=vavgh(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vavgh_RR(rs: i32, rt: i32) -> i32 { + hexagon_A2_svavgh(rs, rt) +} + +/// `Rd32=vavgh(Rs32,Rt32):rnd` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vavgh_RR_rnd(rs: i32, rt: i32) -> i32 { + hexagon_A2_svavghs(rs, rt) +} + +/// `Rd32=vnavgh(Rt32,Rs32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vnavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vnavgh_RR(rt: i32, rs: i32) -> i32 { + hexagon_A2_svnavgh(rt, rs) +} + +/// `Rd32=vsubh(Rt32,Rs32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vsubh_RR(rt: i32, rs: i32) -> i32 { + hexagon_A2_svsubh(rt, rs) +} + +/// `Rd32=vsubh(Rt32,Rs32):sat` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vsubh_RR_sat(rt: i32, rs: i32) -> i32 { + hexagon_A2_svsubhs(rt, rs) +} + +/// `Rd32=vsubuh(Rt32,Rs32):sat` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vsubuh_RR_sat(rt: i32, rs: i32) -> i32 { + hexagon_A2_svsubuhs(rt, rs) +} + +/// `Rd32=swiz(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(swiz))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_swiz_R(rs: i32) -> i32 { + hexagon_A2_swiz(rs) +} + +/// `Rd32=sxtb(Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(sxtb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sxtb_R(rs: i32) -> i32 { + hexagon_A2_sxtb(rs) +} + +/// `Rd32=sxth(Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(sxth))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sxth_R(rs: i32) -> i32 { + hexagon_A2_sxth(rs) +} + +/// `Rdd32=sxtw(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sxtw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_sxtw_R(rs: i32) -> i64 { + hexagon_A2_sxtw(rs) +} + +/// `Rd32=Rs32` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_equals_R(rs: i32) -> i32 { + hexagon_A2_tfr(rs) +} + +/// `Rx32.h=#u16` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_Rh_equals_I(rx: i32) -> i32 { + static_assert_uimm_bits!(IU16, 16); + hexagon_A2_tfrih(rx, IU16 as i32) +} + +/// `Rx32.l=#u16` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_Rl_equals_I(rx: i32) -> i32 { + static_assert_uimm_bits!(IU16, 16); + hexagon_A2_tfril(rx, IU16 as i32) +} + +/// `Rdd32=Rss32` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_equals_P(rss: i64) -> i64 { + hexagon_A2_tfrp(rss) +} + +/// `Rdd32=#s8` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_equals_I() -> i64 { + static_assert_simm_bits!(IS8, 8); + hexagon_A2_tfrpi(IS8) +} + +/// `Rd32=#s16` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(0)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_equals_I() -> i32 { + static_assert_simm_bits!(IS16, 16); + hexagon_A2_tfrsi(IS16) +} + +/// `Rdd32=vabsh(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vabsh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vabsh_P(rss: i64) -> i64 { + hexagon_A2_vabsh(rss) +} + +/// `Rdd32=vabsh(Rss32):sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vabsh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vabsh_P_sat(rss: i64) -> i64 { + hexagon_A2_vabshsat(rss) +} + +/// `Rdd32=vabsw(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vabsw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vabsw_P(rss: i64) -> i64 { + hexagon_A2_vabsw(rss) +} + +/// `Rdd32=vabsw(Rss32):sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vabsw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vabsw_P_sat(rss: i64) -> i64 { + hexagon_A2_vabswsat(rss) +} + +/// `Rdd32=vaddb(Rss32,Rtt32)` +/// +/// Instruction Type: MAPPING +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaddb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaddb_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vaddb_map(rss, rtt) +} + +/// `Rdd32=vaddh(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaddh_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vaddh(rss, rtt) +} + +/// `Rdd32=vaddh(Rss32,Rtt32):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaddh_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vaddhs(rss, rtt) +} + +/// `Rdd32=vaddub(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaddub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaddub_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vaddub(rss, rtt) +} + +/// `Rdd32=vaddub(Rss32,Rtt32):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaddub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaddub_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vaddubs(rss, rtt) +} + +/// `Rdd32=vadduh(Rss32,Rtt32):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vadduh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vadduh_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vadduhs(rss, rtt) +} + +/// `Rdd32=vaddw(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaddw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaddw_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vaddw(rss, rtt) +} + +/// `Rdd32=vaddw(Rss32,Rtt32):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaddw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaddw_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vaddws(rss, rtt) +} + +/// `Rdd32=vavgh(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavgh_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavgh(rss, rtt) +} + +/// `Rdd32=vavgh(Rss32,Rtt32):crnd` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavgh_PP_crnd(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavghcr(rss, rtt) +} + +/// `Rdd32=vavgh(Rss32,Rtt32):rnd` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavgh_PP_rnd(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavghr(rss, rtt) +} + +/// `Rdd32=vavgub(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavgub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavgub_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavgub(rss, rtt) +} + +/// `Rdd32=vavgub(Rss32,Rtt32):rnd` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavgub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavgub_PP_rnd(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavgubr(rss, rtt) +} + +/// `Rdd32=vavguh(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavguh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavguh_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavguh(rss, rtt) +} + +/// `Rdd32=vavguh(Rss32,Rtt32):rnd` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavguh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavguh_PP_rnd(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavguhr(rss, rtt) +} + +/// `Rdd32=vavguw(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavguw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavguw_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavguw(rss, rtt) +} + +/// `Rdd32=vavguw(Rss32,Rtt32):rnd` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavguw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavguw_PP_rnd(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavguwr(rss, rtt) +} + +/// `Rdd32=vavgw(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavgw_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavgw(rss, rtt) +} + +/// `Rdd32=vavgw(Rss32,Rtt32):crnd` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavgw_PP_crnd(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavgwcr(rss, rtt) +} + +/// `Rdd32=vavgw(Rss32,Rtt32):rnd` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vavgw_PP_rnd(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vavgwr(rss, rtt) +} + +/// `Pd4=vcmpb.eq(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpb_eq_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A2_vcmpbeq(rss, rtt) +} + +/// `Pd4=vcmpb.gtu(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpb_gtu_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A2_vcmpbgtu(rss, rtt) +} + +/// `Pd4=vcmph.eq(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmph))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmph_eq_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A2_vcmpheq(rss, rtt) +} + +/// `Pd4=vcmph.gt(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmph))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmph_gt_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A2_vcmphgt(rss, rtt) +} + +/// `Pd4=vcmph.gtu(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmph))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmph_gtu_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A2_vcmphgtu(rss, rtt) +} + +/// `Pd4=vcmpw.eq(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpw_eq_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A2_vcmpweq(rss, rtt) +} + +/// `Pd4=vcmpw.gt(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpw_gt_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A2_vcmpwgt(rss, rtt) +} + +/// `Pd4=vcmpw.gtu(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpw_gtu_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A2_vcmpwgtu(rss, rtt) +} + +/// `Rdd32=vconj(Rss32):sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vconj))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vconj_P_sat(rss: i64) -> i64 { + hexagon_A2_vconj(rss) +} + +/// `Rdd32=vmaxb(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmaxb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmaxb_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vmaxb(rtt, rss) +} + +/// `Rdd32=vmaxh(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmaxh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmaxh_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vmaxh(rtt, rss) +} + +/// `Rdd32=vmaxub(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmaxub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmaxub_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vmaxub(rtt, rss) +} + +/// `Rdd32=vmaxuh(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmaxuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmaxuh_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vmaxuh(rtt, rss) +} + +/// `Rdd32=vmaxuw(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmaxuw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmaxuw_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vmaxuw(rtt, rss) +} + +/// `Rdd32=vmaxw(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmaxw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmaxw_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vmaxw(rtt, rss) +} + +/// `Rdd32=vminb(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vminb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vminb_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vminb(rtt, rss) +} + +/// `Rdd32=vminh(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vminh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vminh_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vminh(rtt, rss) +} + +/// `Rdd32=vminub(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vminub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vminub_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vminub(rtt, rss) +} + +/// `Rdd32=vminuh(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vminuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vminuh_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vminuh(rtt, rss) +} + +/// `Rdd32=vminuw(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vminuw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vminuw_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vminuw(rtt, rss) +} + +/// `Rdd32=vminw(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vminw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vminw_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vminw(rtt, rss) +} + +/// `Rdd32=vnavgh(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vnavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vnavgh_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vnavgh(rtt, rss) +} + +/// `Rdd32=vnavgh(Rtt32,Rss32):crnd:sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vnavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vnavgh_PP_crnd_sat(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vnavghcr(rtt, rss) +} + +/// `Rdd32=vnavgh(Rtt32,Rss32):rnd:sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vnavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vnavgh_PP_rnd_sat(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vnavghr(rtt, rss) +} + +/// `Rdd32=vnavgw(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vnavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vnavgw_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vnavgw(rtt, rss) +} + +/// `Rdd32=vnavgw(Rtt32,Rss32):crnd:sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vnavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vnavgw_PP_crnd_sat(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vnavgwcr(rtt, rss) +} + +/// `Rdd32=vnavgw(Rtt32,Rss32):rnd:sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vnavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vnavgw_PP_rnd_sat(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vnavgwr(rtt, rss) +} + +/// `Rdd32=vraddub(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vraddub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vraddub_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vraddub(rss, rtt) +} + +/// `Rxx32+=vraddub(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vraddub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vraddubacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_A2_vraddub_acc(rxx, rss, rtt) +} + +/// `Rdd32=vrsadub(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrsadub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrsadub_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vrsadub(rss, rtt) +} + +/// `Rxx32+=vrsadub(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrsadub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrsadubacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_A2_vrsadub_acc(rxx, rss, rtt) +} + +/// `Rdd32=vsubb(Rss32,Rtt32)` +/// +/// Instruction Type: MAPPING +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsubb_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_vsubb_map(rss, rtt) +} + +/// `Rdd32=vsubh(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsubh_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vsubh(rtt, rss) +} + +/// `Rdd32=vsubh(Rtt32,Rss32):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsubh_PP_sat(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vsubhs(rtt, rss) +} + +/// `Rdd32=vsubub(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsubub_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vsubub(rtt, rss) +} + +/// `Rdd32=vsubub(Rtt32,Rss32):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsubub_PP_sat(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vsububs(rtt, rss) +} + +/// `Rdd32=vsubuh(Rtt32,Rss32):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsubuh_PP_sat(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vsubuhs(rtt, rss) +} + +/// `Rdd32=vsubw(Rtt32,Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsubw_PP(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vsubw(rtt, rss) +} + +/// `Rdd32=vsubw(Rtt32,Rss32):sat` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsubw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsubw_PP_sat(rtt: i64, rss: i64) -> i64 { + hexagon_A2_vsubws(rtt, rss) +} + +/// `Rd32=xor(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(xor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_xor_RR(rs: i32, rt: i32) -> i32 { + hexagon_A2_xor(rs, rt) +} + +/// `Rdd32=xor(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(xor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_xor_PP(rss: i64, rtt: i64) -> i64 { + hexagon_A2_xorp(rss, rtt) +} + +/// `Rd32=zxtb(Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(zxtb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_zxtb_R(rs: i32) -> i32 { + hexagon_A2_zxtb(rs) +} + +/// `Rd32=zxth(Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(zxth))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_zxth_R(rs: i32) -> i32 { + hexagon_A2_zxth(rs) +} + +/// `Rd32=and(Rt32,~Rs32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_and_RnR(rt: i32, rs: i32) -> i32 { + hexagon_A4_andn(rt, rs) +} + +/// `Rdd32=and(Rtt32,~Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_and_PnP(rtt: i64, rss: i64) -> i64 { + hexagon_A4_andnp(rtt, rss) +} + +/// `Rdd32=bitsplit(Rs32,Rt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(bitsplit))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_bitsplit_RR(rs: i32, rt: i32) -> i64 { + hexagon_A4_bitsplit(rs, rt) +} + +/// `Rdd32=bitsplit(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(bitsplit, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_bitsplit_RI(rs: i32) -> i64 { + static_assert_uimm_bits!(IU5, 5); + hexagon_A4_bitspliti(rs, IU5 as i32) +} + +/// `Pd4=boundscheck(Rs32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(boundscheck))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_boundscheck_RP(rs: i32, rtt: i64) -> i32 { + hexagon_A4_boundscheck(rs, rtt) +} + +/// `Pd4=cmpb.eq(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmpb_eq_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_cmpbeq(rs, rt) +} + +/// `Pd4=cmpb.eq(Rs32,#u8)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmpb, IU8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmpb_eq_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + hexagon_A4_cmpbeqi(rs, IU8 as i32) +} + +/// `Pd4=cmpb.gt(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmpb_gt_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_cmpbgt(rs, rt) +} + +/// `Pd4=cmpb.gt(Rs32,#s8)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmpb, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmpb_gt_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_cmpbgti(rs, IS8) +} + +/// `Pd4=cmpb.gtu(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmpb_gtu_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_cmpbgtu(rs, rt) +} + +/// `Pd4=cmpb.gtu(Rs32,#u7)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmpb, IU7 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmpb_gtu_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU7, 7); + hexagon_A4_cmpbgtui(rs, IU7 as i32) +} + +/// `Pd4=cmph.eq(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmph))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmph_eq_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_cmpheq(rs, rt) +} + +/// `Pd4=cmph.eq(Rs32,#s8)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmph, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmph_eq_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_cmpheqi(rs, IS8) +} + +/// `Pd4=cmph.gt(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmph))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmph_gt_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_cmphgt(rs, rt) +} + +/// `Pd4=cmph.gt(Rs32,#s8)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmph, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmph_gt_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_cmphgti(rs, IS8) +} + +/// `Pd4=cmph.gtu(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmph))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmph_gtu_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_cmphgtu(rs, rt) +} + +/// `Pd4=cmph.gtu(Rs32,#u7)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmph, IU7 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmph_gtu_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU7, 7); + hexagon_A4_cmphgtui(rs, IU7 as i32) +} + +/// `Rdd32=combine(#s8,Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(0)] +#[cfg_attr(test, assert_instr(combine, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_combine_IR(rs: i32) -> i64 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_combineir(IS8, rs) +} + +/// `Rdd32=combine(Rs32,#s8)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(combine, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_combine_RI(rs: i32) -> i64 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_combineri(rs, IS8) +} + +/// `Rd32=cround(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cround, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cround_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_A4_cround_ri(rs, IU5 as i32) +} + +/// `Rd32=cround(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cround))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cround_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_cround_rr(rs, rt) +} + +/// `Rd32=modwrap(Rs32,Rt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(modwrap))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_modwrap_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_modwrapu(rs, rt) +} + +/// `Rd32=or(Rt32,~Rs32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_or_RnR(rt: i32, rs: i32) -> i32 { + hexagon_A4_orn(rt, rs) +} + +/// `Rdd32=or(Rtt32,~Rss32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_or_PnP(rtt: i64, rss: i64) -> i64 { + hexagon_A4_ornp(rtt, rss) +} + +/// `Rd32=cmp.eq(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmp_eq_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_rcmpeq(rs, rt) +} + +/// `Rd32=cmp.eq(Rs32,#s8)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmp, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmp_eq_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_rcmpeqi(rs, IS8) +} + +/// `Rd32=!cmp.eq(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_not_cmp_eq_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_rcmpneq(rs, rt) +} + +/// `Rd32=!cmp.eq(Rs32,#s8)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_not_cmp_eq_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_rcmpneqi(rs, IS8) +} + +/// `Rd32=round(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(round, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_round_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_A4_round_ri(rs, IU5 as i32) +} + +/// `Rd32=round(Rs32,#u5):sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(round, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_round_RI_sat(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_A4_round_ri_sat(rs, IU5 as i32) +} + +/// `Rd32=round(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(round))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_round_RR(rs: i32, rt: i32) -> i32 { + hexagon_A4_round_rr(rs, rt) +} + +/// `Rd32=round(Rs32,Rt32):sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(round))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_round_RR_sat(rs: i32, rt: i32) -> i32 { + hexagon_A4_round_rr_sat(rs, rt) +} + +/// `Pd4=tlbmatch(Rss32,Rt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(tlbmatch))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_tlbmatch_PR(rss: i64, rt: i32) -> i32 { + hexagon_A4_tlbmatch(rss, rt) +} + +/// `Pd4=any8(vcmpb.eq(Rss32,Rtt32))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(any8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_any8_vcmpb_eq_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A4_vcmpbeq_any(rss, rtt) +} + +/// `Pd4=vcmpb.eq(Rss32,#u8)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vcmpb, IU8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpb_eq_PI(rss: i64) -> i32 { + static_assert_uimm_bits!(IU8, 8); + hexagon_A4_vcmpbeqi(rss, IU8 as i32) +} + +/// `Pd4=vcmpb.gt(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpb_gt_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A4_vcmpbgt(rss, rtt) +} + +/// `Pd4=vcmpb.gt(Rss32,#s8)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vcmpb, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpb_gt_PI(rss: i64) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_vcmpbgti(rss, IS8) +} + +/// `Pd4=vcmpb.gtu(Rss32,#u7)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vcmpb, IU7 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpb_gtu_PI(rss: i64) -> i32 { + static_assert_uimm_bits!(IU7, 7); + hexagon_A4_vcmpbgtui(rss, IU7 as i32) +} + +/// `Pd4=vcmph.eq(Rss32,#s8)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vcmph, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmph_eq_PI(rss: i64) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_vcmpheqi(rss, IS8) +} + +/// `Pd4=vcmph.gt(Rss32,#s8)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vcmph, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmph_gt_PI(rss: i64) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_vcmphgti(rss, IS8) +} + +/// `Pd4=vcmph.gtu(Rss32,#u7)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vcmph, IU7 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmph_gtu_PI(rss: i64) -> i32 { + static_assert_uimm_bits!(IU7, 7); + hexagon_A4_vcmphgtui(rss, IU7 as i32) +} + +/// `Pd4=vcmpw.eq(Rss32,#s8)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vcmpw, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpw_eq_PI(rss: i64) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_vcmpweqi(rss, IS8) +} + +/// `Pd4=vcmpw.gt(Rss32,#s8)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vcmpw, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpw_gt_PI(rss: i64) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_A4_vcmpwgti(rss, IS8) +} + +/// `Pd4=vcmpw.gtu(Rss32,#u7)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vcmpw, IU7 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_vcmpw_gtu_PI(rss: i64) -> i32 { + static_assert_uimm_bits!(IU7, 7); + hexagon_A4_vcmpwgtui(rss, IU7 as i32) +} + +/// `Rxx32=vrmaxh(Rss32,Ru32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmaxh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmaxh_PR(rxx: i64, rss: i64, ru: i32) -> i64 { + hexagon_A4_vrmaxh(rxx, rss, ru) +} + +/// `Rxx32=vrmaxuh(Rss32,Ru32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmaxuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmaxuh_PR(rxx: i64, rss: i64, ru: i32) -> i64 { + hexagon_A4_vrmaxuh(rxx, rss, ru) +} + +/// `Rxx32=vrmaxuw(Rss32,Ru32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmaxuw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmaxuw_PR(rxx: i64, rss: i64, ru: i32) -> i64 { + hexagon_A4_vrmaxuw(rxx, rss, ru) +} + +/// `Rxx32=vrmaxw(Rss32,Ru32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmaxw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmaxw_PR(rxx: i64, rss: i64, ru: i32) -> i64 { + hexagon_A4_vrmaxw(rxx, rss, ru) +} + +/// `Rxx32=vrminh(Rss32,Ru32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrminh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrminh_PR(rxx: i64, rss: i64, ru: i32) -> i64 { + hexagon_A4_vrminh(rxx, rss, ru) +} + +/// `Rxx32=vrminuh(Rss32,Ru32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrminuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrminuh_PR(rxx: i64, rss: i64, ru: i32) -> i64 { + hexagon_A4_vrminuh(rxx, rss, ru) +} + +/// `Rxx32=vrminuw(Rss32,Ru32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrminuw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrminuw_PR(rxx: i64, rss: i64, ru: i32) -> i64 { + hexagon_A4_vrminuw(rxx, rss, ru) +} + +/// `Rxx32=vrminw(Rss32,Ru32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrminw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrminw_PR(rxx: i64, rss: i64, ru: i32) -> i64 { + hexagon_A4_vrminw(rxx, rss, ru) +} + +/// `Rd32=vaddhub(Rss32,Rtt32):sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaddhub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vaddhub_PP_sat(rss: i64, rtt: i64) -> i32 { + hexagon_A5_vaddhubs(rss, rtt) +} + +/// `Pd4=all8(Ps4)` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(all8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_all8_p(ps: i32) -> i32 { + hexagon_C2_all8(ps) +} + +/// `Pd4=and(Pt4,Ps4)` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_and_pp(pt: i32, ps: i32) -> i32 { + hexagon_C2_and(pt, ps) +} + +/// `Pd4=and(Pt4,!Ps4)` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_and_pnp(pt: i32, ps: i32) -> i32 { + hexagon_C2_andn(pt, ps) +} + +/// `Pd4=any8(Ps4)` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(any8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_any8_p(ps: i32) -> i32 { + hexagon_C2_any8(ps) +} + +/// `Pd4=bitsclr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(bitsclr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_bitsclr_RR(rs: i32, rt: i32) -> i32 { + hexagon_C2_bitsclr(rs, rt) +} + +/// `Pd4=bitsclr(Rs32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(bitsclr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_bitsclr_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU6, 6); + hexagon_C2_bitsclri(rs, IU6 as i32) +} + +/// `Pd4=bitsset(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(bitsset))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_bitsset_RR(rs: i32, rt: i32) -> i32 { + hexagon_C2_bitsset(rs, rt) +} + +/// `Pd4=cmp.eq(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_eq_RR(rs: i32, rt: i32) -> i32 { + hexagon_C2_cmpeq(rs, rt) +} + +/// `Pd4=cmp.eq(Rs32,#s10)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmp, IS10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_eq_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS10, 10); + hexagon_C2_cmpeqi(rs, IS10) +} + +/// `Pd4=cmp.eq(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_eq_PP(rss: i64, rtt: i64) -> i32 { + hexagon_C2_cmpeqp(rss, rtt) +} + +/// `Pd4=cmp.ge(Rs32,#s8)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmp, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_ge_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_C2_cmpgei(rs, IS8) +} + +/// `Pd4=cmp.geu(Rs32,#u8)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmp, IU8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_geu_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + hexagon_C2_cmpgeui(rs, IU8 as i32) +} + +/// `Pd4=cmp.gt(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_gt_RR(rs: i32, rt: i32) -> i32 { + hexagon_C2_cmpgt(rs, rt) +} + +/// `Pd4=cmp.gt(Rs32,#s10)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmp, IS10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_gt_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS10, 10); + hexagon_C2_cmpgti(rs, IS10) +} + +/// `Pd4=cmp.gt(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_gt_PP(rss: i64, rtt: i64) -> i32 { + hexagon_C2_cmpgtp(rss, rtt) +} + +/// `Pd4=cmp.gtu(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_gtu_RR(rs: i32, rt: i32) -> i32 { + hexagon_C2_cmpgtu(rs, rt) +} + +/// `Pd4=cmp.gtu(Rs32,#u9)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cmp, IU9 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_gtu_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU9, 9); + hexagon_C2_cmpgtui(rs, IU9 as i32) +} + +/// `Pd4=cmp.gtu(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_gtu_PP(rss: i64, rtt: i64) -> i32 { + hexagon_C2_cmpgtup(rss, rtt) +} + +/// `Pd4=cmp.lt(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_lt_RR(rs: i32, rt: i32) -> i32 { + hexagon_C2_cmplt(rs, rt) +} + +/// `Pd4=cmp.ltu(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_cmp_ltu_RR(rs: i32, rt: i32) -> i32 { + hexagon_C2_cmpltu(rs, rt) +} + +/// `Rdd32=mask(Pt4)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mask))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mask_p(pt: i32) -> i64 { + hexagon_C2_mask(pt) +} + +/// `Rd32=mux(Pu4,Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(mux))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mux_pRR(pu: i32, rs: i32, rt: i32) -> i32 { + hexagon_C2_mux(pu, rs, rt) +} + +/// `Rd32=mux(Pu4,#s8,#S8)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1, 2)] +#[cfg_attr(test, assert_instr(mux, IS8 = 0, IS8_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mux_pII(pu: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + static_assert_simm_bits!(IS8_2, 8); + hexagon_C2_muxii(pu, IS8, IS8_2) +} + +/// `Rd32=mux(Pu4,Rs32,#s8)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(mux, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mux_pRI(pu: i32, rs: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_C2_muxir(pu, rs, IS8) +} + +/// `Rd32=mux(Pu4,#s8,Rs32)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(mux, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mux_pIR(pu: i32, rs: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_C2_muxri(pu, IS8, rs) +} + +/// `Pd4=not(Ps4)` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(not))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_p(ps: i32) -> i32 { + hexagon_C2_not(ps) +} + +/// `Pd4=or(Pt4,Ps4)` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_or_pp(pt: i32, ps: i32) -> i32 { + hexagon_C2_or(pt, ps) +} + +/// `Pd4=or(Pt4,!Ps4)` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_or_pnp(pt: i32, ps: i32) -> i32 { + hexagon_C2_orn(pt, ps) +} + +/// `Pd4=Ps4` +/// +/// Instruction Type: MAPPING +/// Execution Slots: SLOT0123 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_equals_p(ps: i32) -> i32 { + hexagon_C2_pxfer_map(ps) +} + +/// `Rd32=Ps4` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_equals_p(ps: i32) -> i32 { + hexagon_C2_tfrpr(ps) +} + +/// `Pd4=Rs32` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_equals_R(rs: i32) -> i32 { + hexagon_C2_tfrrp(rs) +} + +/// `Rd32=vitpack(Ps4,Pt4)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vitpack))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vitpack_pp(ps: i32, pt: i32) -> i32 { + hexagon_C2_vitpack(ps, pt) +} + +/// `Rdd32=vmux(Pu4,Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmux))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmux_pPP(pu: i32, rss: i64, rtt: i64) -> i64 { + hexagon_C2_vmux(pu, rss, rtt) +} + +/// `Pd4=xor(Ps4,Pt4)` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(xor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_xor_pp(ps: i32, pt: i32) -> i32 { + hexagon_C2_xor(ps, pt) +} + +/// `Pd4=and(Ps4,and(Pt4,Pu4))` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_and_and_ppp(ps: i32, pt: i32, pu: i32) -> i32 { + hexagon_C4_and_and(ps, pt, pu) +} + +/// `Pd4=and(Ps4,and(Pt4,!Pu4))` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_and_and_ppnp(ps: i32, pt: i32, pu: i32) -> i32 { + hexagon_C4_and_andn(ps, pt, pu) +} + +/// `Pd4=and(Ps4,or(Pt4,Pu4))` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_and_or_ppp(ps: i32, pt: i32, pu: i32) -> i32 { + hexagon_C4_and_or(ps, pt, pu) +} + +/// `Pd4=and(Ps4,or(Pt4,!Pu4))` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_and_or_ppnp(ps: i32, pt: i32, pu: i32) -> i32 { + hexagon_C4_and_orn(ps, pt, pu) +} + +/// `Pd4=!cmp.gt(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_cmp_gt_RR(rs: i32, rt: i32) -> i32 { + hexagon_C4_cmplte(rs, rt) +} + +/// `Pd4=!cmp.gt(Rs32,#s10)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_cmp_gt_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS10, 10); + hexagon_C4_cmpltei(rs, IS10) +} + +/// `Pd4=!cmp.gtu(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_cmp_gtu_RR(rs: i32, rt: i32) -> i32 { + hexagon_C4_cmplteu(rs, rt) +} + +/// `Pd4=!cmp.gtu(Rs32,#u9)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_cmp_gtu_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU9, 9); + hexagon_C4_cmplteui(rs, IU9 as i32) +} + +/// `Pd4=!cmp.eq(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_cmp_eq_RR(rs: i32, rt: i32) -> i32 { + hexagon_C4_cmpneq(rs, rt) +} + +/// `Pd4=!cmp.eq(Rs32,#s10)` +/// +/// Instruction Type: ALU32_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_cmp_eq_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS10, 10); + hexagon_C4_cmpneqi(rs, IS10) +} + +/// `Pd4=fastcorner9(Ps4,Pt4)` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(fastcorner9))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_fastcorner9_pp(ps: i32, pt: i32) -> i32 { + hexagon_C4_fastcorner9(ps, pt) +} + +/// `Pd4=!fastcorner9(Ps4,Pt4)` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_fastcorner9_pp(ps: i32, pt: i32) -> i32 { + hexagon_C4_fastcorner9_not(ps, pt) +} + +/// `Pd4=!bitsclr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_bitsclr_RR(rs: i32, rt: i32) -> i32 { + hexagon_C4_nbitsclr(rs, rt) +} + +/// `Pd4=!bitsclr(Rs32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_bitsclr_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU6, 6); + hexagon_C4_nbitsclri(rs, IU6 as i32) +} + +/// `Pd4=!bitsset(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_bitsset_RR(rs: i32, rt: i32) -> i32 { + hexagon_C4_nbitsset(rs, rt) +} + +/// `Pd4=or(Ps4,and(Pt4,Pu4))` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_or_and_ppp(ps: i32, pt: i32, pu: i32) -> i32 { + hexagon_C4_or_and(ps, pt, pu) +} + +/// `Pd4=or(Ps4,and(Pt4,!Pu4))` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_or_and_ppnp(ps: i32, pt: i32, pu: i32) -> i32 { + hexagon_C4_or_andn(ps, pt, pu) +} + +/// `Pd4=or(Ps4,or(Pt4,Pu4))` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_or_or_ppp(ps: i32, pt: i32, pu: i32) -> i32 { + hexagon_C4_or_or(ps, pt, pu) +} + +/// `Pd4=or(Ps4,or(Pt4,!Pu4))` +/// +/// Instruction Type: CR +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_or_or_ppnp(ps: i32, pt: i32, pu: i32) -> i32 { + hexagon_C4_or_orn(ps, pt, pu) +} + +/// `Rdd32=convert_d2df(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_d2df))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_d2df_P(rss: i64) -> f64 { + hexagon_F2_conv_d2df(rss) +} + +/// `Rd32=convert_d2sf(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_d2sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_d2sf_P(rss: i64) -> f32 { + hexagon_F2_conv_d2sf(rss) +} + +/// `Rdd32=convert_df2d(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_df2d))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_df2d_P(rss: f64) -> i64 { + hexagon_F2_conv_df2d(rss) +} + +/// `Rdd32=convert_df2d(Rss32):chop` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_df2d))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_df2d_P_chop(rss: f64) -> i64 { + hexagon_F2_conv_df2d_chop(rss) +} + +/// `Rd32=convert_df2sf(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_df2sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_df2sf_P(rss: f64) -> f32 { + hexagon_F2_conv_df2sf(rss) +} + +/// `Rdd32=convert_df2ud(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_df2ud))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_df2ud_P(rss: f64) -> i64 { + hexagon_F2_conv_df2ud(rss) +} + +/// `Rdd32=convert_df2ud(Rss32):chop` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_df2ud))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_df2ud_P_chop(rss: f64) -> i64 { + hexagon_F2_conv_df2ud_chop(rss) +} + +/// `Rd32=convert_df2uw(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_df2uw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_df2uw_P(rss: f64) -> i32 { + hexagon_F2_conv_df2uw(rss) +} + +/// `Rd32=convert_df2uw(Rss32):chop` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_df2uw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_df2uw_P_chop(rss: f64) -> i32 { + hexagon_F2_conv_df2uw_chop(rss) +} + +/// `Rd32=convert_df2w(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_df2w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_df2w_P(rss: f64) -> i32 { + hexagon_F2_conv_df2w(rss) +} + +/// `Rd32=convert_df2w(Rss32):chop` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_df2w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_df2w_P_chop(rss: f64) -> i32 { + hexagon_F2_conv_df2w_chop(rss) +} + +/// `Rdd32=convert_sf2d(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_sf2d))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_sf2d_R(rs: f32) -> i64 { + hexagon_F2_conv_sf2d(rs) +} + +/// `Rdd32=convert_sf2d(Rs32):chop` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_sf2d))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_sf2d_R_chop(rs: f32) -> i64 { + hexagon_F2_conv_sf2d_chop(rs) +} + +/// `Rdd32=convert_sf2df(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_sf2df))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_sf2df_R(rs: f32) -> f64 { + hexagon_F2_conv_sf2df(rs) +} + +/// `Rdd32=convert_sf2ud(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_sf2ud))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_sf2ud_R(rs: f32) -> i64 { + hexagon_F2_conv_sf2ud(rs) +} + +/// `Rdd32=convert_sf2ud(Rs32):chop` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_sf2ud))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_sf2ud_R_chop(rs: f32) -> i64 { + hexagon_F2_conv_sf2ud_chop(rs) +} + +/// `Rd32=convert_sf2uw(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_sf2uw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_sf2uw_R(rs: f32) -> i32 { + hexagon_F2_conv_sf2uw(rs) +} + +/// `Rd32=convert_sf2uw(Rs32):chop` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_sf2uw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_sf2uw_R_chop(rs: f32) -> i32 { + hexagon_F2_conv_sf2uw_chop(rs) +} + +/// `Rd32=convert_sf2w(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_sf2w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_sf2w_R(rs: f32) -> i32 { + hexagon_F2_conv_sf2w(rs) +} + +/// `Rd32=convert_sf2w(Rs32):chop` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_sf2w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_sf2w_R_chop(rs: f32) -> i32 { + hexagon_F2_conv_sf2w_chop(rs) +} + +/// `Rdd32=convert_ud2df(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_ud2df))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_ud2df_P(rss: i64) -> f64 { + hexagon_F2_conv_ud2df(rss) +} + +/// `Rd32=convert_ud2sf(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_ud2sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_ud2sf_P(rss: i64) -> f32 { + hexagon_F2_conv_ud2sf(rss) +} + +/// `Rdd32=convert_uw2df(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_uw2df))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_uw2df_R(rs: i32) -> f64 { + hexagon_F2_conv_uw2df(rs) +} + +/// `Rd32=convert_uw2sf(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_uw2sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_uw2sf_R(rs: i32) -> f32 { + hexagon_F2_conv_uw2sf(rs) +} + +/// `Rdd32=convert_w2df(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_w2df))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_convert_w2df_R(rs: i32) -> f64 { + hexagon_F2_conv_w2df(rs) +} + +/// `Rd32=convert_w2sf(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(convert_w2sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_convert_w2sf_R(rs: i32) -> f32 { + hexagon_F2_conv_w2sf(rs) +} + +/// `Pd4=dfclass(Rss32,#u5)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(dfclass, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_dfclass_PI(rss: f64) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_F2_dfclass(rss, IU5 as i32) +} + +/// `Pd4=dfcmp.eq(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(dfcmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_dfcmp_eq_PP(rss: f64, rtt: f64) -> i32 { + hexagon_F2_dfcmpeq(rss, rtt) +} + +/// `Pd4=dfcmp.ge(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(dfcmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_dfcmp_ge_PP(rss: f64, rtt: f64) -> i32 { + hexagon_F2_dfcmpge(rss, rtt) +} + +/// `Pd4=dfcmp.gt(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(dfcmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_dfcmp_gt_PP(rss: f64, rtt: f64) -> i32 { + hexagon_F2_dfcmpgt(rss, rtt) +} + +/// `Pd4=dfcmp.uo(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(dfcmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_dfcmp_uo_PP(rss: f64, rtt: f64) -> i32 { + hexagon_F2_dfcmpuo(rss, rtt) +} + +/// `Rdd32=dfmake(#u10):neg` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0)] +#[cfg_attr(test, assert_instr(dfmake, IU10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_dfmake_I_neg() -> f64 { + static_assert_uimm_bits!(IU10, 10); + hexagon_F2_dfimm_n(IU10 as i32) +} + +/// `Rdd32=dfmake(#u10):pos` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0)] +#[cfg_attr(test, assert_instr(dfmake, IU10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_dfmake_I_pos() -> f64 { + static_assert_uimm_bits!(IU10, 10); + hexagon_F2_dfimm_p(IU10 as i32) +} + +/// `Rd32=sfadd(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfadd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfadd_RR(rs: f32, rt: f32) -> f32 { + hexagon_F2_sfadd(rs, rt) +} + +/// `Pd4=sfclass(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(sfclass, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_sfclass_RI(rs: f32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_F2_sfclass(rs, IU5 as i32) +} + +/// `Pd4=sfcmp.eq(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfcmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_sfcmp_eq_RR(rs: f32, rt: f32) -> i32 { + hexagon_F2_sfcmpeq(rs, rt) +} + +/// `Pd4=sfcmp.ge(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfcmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_sfcmp_ge_RR(rs: f32, rt: f32) -> i32 { + hexagon_F2_sfcmpge(rs, rt) +} + +/// `Pd4=sfcmp.gt(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfcmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_sfcmp_gt_RR(rs: f32, rt: f32) -> i32 { + hexagon_F2_sfcmpgt(rs, rt) +} + +/// `Pd4=sfcmp.uo(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfcmp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_sfcmp_uo_RR(rs: f32, rt: f32) -> i32 { + hexagon_F2_sfcmpuo(rs, rt) +} + +/// `Rd32=sffixupd(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sffixupd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sffixupd_RR(rs: f32, rt: f32) -> f32 { + hexagon_F2_sffixupd(rs, rt) +} + +/// `Rd32=sffixupn(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sffixupn))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sffixupn_RR(rs: f32, rt: f32) -> f32 { + hexagon_F2_sffixupn(rs, rt) +} + +/// `Rd32=sffixupr(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sffixupr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sffixupr_R(rs: f32) -> f32 { + hexagon_F2_sffixupr(rs) +} + +/// `Rx32+=sfmpy(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfmpyacc_RR(rx: f32, rs: f32, rt: f32) -> f32 { + hexagon_F2_sffma(rx, rs, rt) +} + +/// `Rx32+=sfmpy(Rs32,Rt32):lib` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfmpyacc_RR_lib(rx: f32, rs: f32, rt: f32) -> f32 { + hexagon_F2_sffma_lib(rx, rs, rt) +} + +/// `Rx32+=sfmpy(Rs32,Rt32,Pu4):scale` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfmpyacc_RRp_scale(rx: f32, rs: f32, rt: f32, pu: i32) -> f32 { + hexagon_F2_sffma_sc(rx, rs, rt, pu) +} + +/// `Rx32-=sfmpy(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfmpynac_RR(rx: f32, rs: f32, rt: f32) -> f32 { + hexagon_F2_sffms(rx, rs, rt) +} + +/// `Rx32-=sfmpy(Rs32,Rt32):lib` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfmpynac_RR_lib(rx: f32, rs: f32, rt: f32) -> f32 { + hexagon_F2_sffms_lib(rx, rs, rt) +} + +/// `Rd32=sfmake(#u10):neg` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0)] +#[cfg_attr(test, assert_instr(sfmake, IU10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfmake_I_neg() -> f32 { + static_assert_uimm_bits!(IU10, 10); + hexagon_F2_sfimm_n(IU10 as i32) +} + +/// `Rd32=sfmake(#u10):pos` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0)] +#[cfg_attr(test, assert_instr(sfmake, IU10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfmake_I_pos() -> f32 { + static_assert_uimm_bits!(IU10, 10); + hexagon_F2_sfimm_p(IU10 as i32) +} + +/// `Rd32=sfmax(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfmax))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfmax_RR(rs: f32, rt: f32) -> f32 { + hexagon_F2_sfmax(rs, rt) +} + +/// `Rd32=sfmin(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfmin))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfmin_RR(rs: f32, rt: f32) -> f32 { + hexagon_F2_sfmin(rs, rt) +} + +/// `Rd32=sfmpy(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfmpy_RR(rs: f32, rt: f32) -> f32 { + hexagon_F2_sfmpy(rs, rt) +} + +/// `Rd32=sfsub(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sfsub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sfsub_RR(rs: f32, rt: f32) -> f32 { + hexagon_F2_sfsub(rs, rt) +} + +/// `Rx32+=add(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_addacc_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_acci(rx, rs, rt) +} + +/// `Rx32+=add(Rs32,#s8)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(add, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_addacc_RI(rx: i32, rs: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_M2_accii(rx, rs, IS8) +} + +/// `Rxx32+=cmpyi(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyiacc_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_cmaci_s0(rxx, rs, rt) +} + +/// `Rxx32+=cmpyr(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpyr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyracc_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_cmacr_s0(rxx, rs, rt) +} + +/// `Rxx32+=cmpy(Rs32,Rt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyacc_RR_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_cmacs_s0(rxx, rs, rt) +} + +/// `Rxx32+=cmpy(Rs32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyacc_RR_s1_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_cmacs_s1(rxx, rs, rt) +} + +/// `Rxx32+=cmpy(Rs32,Rt32*):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyacc_RR_conj_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_cmacsc_s0(rxx, rs, rt) +} + +/// `Rxx32+=cmpy(Rs32,Rt32*):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyacc_RR_conj_s1_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_cmacsc_s1(rxx, rs, rt) +} + +/// `Rdd32=cmpyi(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyi_RR(rs: i32, rt: i32) -> i64 { + hexagon_M2_cmpyi_s0(rs, rt) +} + +/// `Rdd32=cmpyr(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpyr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyr_RR(rs: i32, rt: i32) -> i64 { + hexagon_M2_cmpyr_s0(rs, rt) +} + +/// `Rd32=cmpy(Rs32,Rt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpy_RR_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_cmpyrs_s0(rs, rt) +} + +/// `Rd32=cmpy(Rs32,Rt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpy_RR_s1_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_cmpyrs_s1(rs, rt) +} + +/// `Rd32=cmpy(Rs32,Rt32*):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpy_RR_conj_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_cmpyrsc_s0(rs, rt) +} + +/// `Rd32=cmpy(Rs32,Rt32*):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpy_RR_conj_s1_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_cmpyrsc_s1(rs, rt) +} + +/// `Rdd32=cmpy(Rs32,Rt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpy_RR_sat(rs: i32, rt: i32) -> i64 { + hexagon_M2_cmpys_s0(rs, rt) +} + +/// `Rdd32=cmpy(Rs32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpy_RR_s1_sat(rs: i32, rt: i32) -> i64 { + hexagon_M2_cmpys_s1(rs, rt) +} + +/// `Rdd32=cmpy(Rs32,Rt32*):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpy_RR_conj_sat(rs: i32, rt: i32) -> i64 { + hexagon_M2_cmpysc_s0(rs, rt) +} + +/// `Rdd32=cmpy(Rs32,Rt32*):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpy_RR_conj_s1_sat(rs: i32, rt: i32) -> i64 { + hexagon_M2_cmpysc_s1(rs, rt) +} + +/// `Rxx32-=cmpy(Rs32,Rt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpynac_RR_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_cnacs_s0(rxx, rs, rt) +} + +/// `Rxx32-=cmpy(Rs32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpynac_RR_s1_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_cnacs_s1(rxx, rs, rt) +} + +/// `Rxx32-=cmpy(Rs32,Rt32*):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpynac_RR_conj_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_cnacsc_s0(rxx, rs, rt) +} + +/// `Rxx32-=cmpy(Rs32,Rt32*):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpynac_RR_conj_s1_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_cnacsc_s1(rxx, rs, rt) +} + +/// `Rxx32+=mpy(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyacc_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_dpmpyss_acc_s0(rxx, rs, rt) +} + +/// `Rxx32-=mpy(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpynac_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_dpmpyss_nac_s0(rxx, rs, rt) +} + +/// `Rd32=mpy(Rs32,Rt32):rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RR_rnd(rs: i32, rt: i32) -> i32 { + hexagon_M2_dpmpyss_rnd_s0(rs, rt) +} + +/// `Rdd32=mpy(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RR(rs: i32, rt: i32) -> i64 { + hexagon_M2_dpmpyss_s0(rs, rt) +} + +/// `Rxx32+=mpyu(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyuacc_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_dpmpyuu_acc_s0(rxx, rs, rt) +} + +/// `Rxx32-=mpyu(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyunac_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_dpmpyuu_nac_s0(rxx, rs, rt) +} + +/// `Rdd32=mpyu(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyu_RR(rs: i32, rt: i32) -> i64 { + hexagon_M2_dpmpyuu_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32,Rt32.h):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RRh_s1_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_hmmpyh_rs1(rs, rt) +} + +/// `Rd32=mpy(Rs32,Rt32.h):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RRh_s1_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_hmmpyh_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32,Rt32.l):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RRl_s1_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_hmmpyl_rs1(rs, rt) +} + +/// `Rd32=mpy(Rs32,Rt32.l):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RRl_s1_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_hmmpyl_s1(rs, rt) +} + +/// `Rx32+=mpyi(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyiacc_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_maci(rx, rs, rt) +} + +/// `Rx32-=mpyi(Rs32,#u8)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(mpyi, IU8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyinac_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + hexagon_M2_macsin(rx, rs, IU8 as i32) +} + +/// `Rx32+=mpyi(Rs32,#u8)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(mpyi, IU8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyiacc_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + hexagon_M2_macsip(rx, rs, IU8 as i32) +} + +/// `Rxx32+=vmpywoh(Rss32,Rtt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywohacc_PP_rnd_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmachs_rs0(rxx, rss, rtt) +} + +/// `Rxx32+=vmpywoh(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywohacc_PP_s1_rnd_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmachs_rs1(rxx, rss, rtt) +} + +/// `Rxx32+=vmpywoh(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywohacc_PP_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmachs_s0(rxx, rss, rtt) +} + +/// `Rxx32+=vmpywoh(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywohacc_PP_s1_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmachs_s1(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyweh(Rss32,Rtt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywehacc_PP_rnd_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmacls_rs0(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyweh(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywehacc_PP_s1_rnd_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmacls_rs1(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyweh(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywehacc_PP_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmacls_s0(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyweh(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywehacc_PP_s1_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmacls_s1(rxx, rss, rtt) +} + +/// `Rxx32+=vmpywouh(Rss32,Rtt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywouh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywouhacc_PP_rnd_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmacuhs_rs0(rxx, rss, rtt) +} + +/// `Rxx32+=vmpywouh(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywouh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywouhacc_PP_s1_rnd_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmacuhs_rs1(rxx, rss, rtt) +} + +/// `Rxx32+=vmpywouh(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywouh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywouhacc_PP_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmacuhs_s0(rxx, rss, rtt) +} + +/// `Rxx32+=vmpywouh(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywouh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywouhacc_PP_s1_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmacuhs_s1(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyweuh(Rss32,Rtt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweuhacc_PP_rnd_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmaculs_rs0(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweuhacc_PP_s1_rnd_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmaculs_rs1(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyweuh(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweuhacc_PP_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmaculs_s0(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweuhacc_PP_s1_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmaculs_s1(rxx, rss, rtt) +} + +/// `Rdd32=vmpywoh(Rss32,Rtt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywoh_PP_rnd_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyh_rs0(rss, rtt) +} + +/// `Rdd32=vmpywoh(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywoh_PP_s1_rnd_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyh_rs1(rss, rtt) +} + +/// `Rdd32=vmpywoh(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywoh_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyh_s0(rss, rtt) +} + +/// `Rdd32=vmpywoh(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywoh_PP_s1_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyh_s1(rss, rtt) +} + +/// `Rdd32=vmpyweh(Rss32,Rtt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweh_PP_rnd_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyl_rs0(rss, rtt) +} + +/// `Rdd32=vmpyweh(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweh_PP_s1_rnd_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyl_rs1(rss, rtt) +} + +/// `Rdd32=vmpyweh(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweh_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyl_s0(rss, rtt) +} + +/// `Rdd32=vmpyweh(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweh_PP_s1_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyl_s1(rss, rtt) +} + +/// `Rdd32=vmpywouh(Rss32,Rtt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywouh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywouh_PP_rnd_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyuh_rs0(rss, rtt) +} + +/// `Rdd32=vmpywouh(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywouh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywouh_PP_s1_rnd_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyuh_rs1(rss, rtt) +} + +/// `Rdd32=vmpywouh(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywouh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywouh_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyuh_s0(rss, rtt) +} + +/// `Rdd32=vmpywouh(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpywouh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpywouh_PP_s1_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyuh_s1(rss, rtt) +} + +/// `Rdd32=vmpyweuh(Rss32,Rtt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweuh_PP_rnd_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyul_rs0(rss, rtt) +} + +/// `Rdd32=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweuh_PP_s1_rnd_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyul_rs1(rss, rtt) +} + +/// `Rdd32=vmpyweuh(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweuh_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyul_s0(rss, rtt) +} + +/// `Rdd32=vmpyweuh(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyweuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyweuh_PP_s1_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_mmpyul_s1(rss, rtt) +} + +/// `Rx32+=mpy(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RhRh(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_hh_s0(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RhRh_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_hh_s1(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RhRl(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_hl_s0(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RhRl_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_hl_s1(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RlRh(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_lh_s0(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RlRh_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_lh_s1(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RlRl(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_ll_s0(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RlRl_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_ll_s1(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.h,Rt32.h):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RhRh_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_sat_hh_s0(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.h,Rt32.h):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RhRh_s1_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_sat_hh_s1(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.h,Rt32.l):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RhRl_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_sat_hl_s0(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.h,Rt32.l):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RhRl_s1_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_sat_hl_s1(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.l,Rt32.h):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RlRh_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_sat_lh_s0(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.l,Rt32.h):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RlRh_s1_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_sat_lh_s1(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.l,Rt32.l):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RlRl_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_sat_ll_s0(rx, rs, rt) +} + +/// `Rx32+=mpy(Rs32.l,Rt32.l):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RlRl_s1_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_acc_sat_ll_s1(rx, rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRh(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_hh_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRh_s1(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_hh_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRl(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_hl_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRl_s1(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_hl_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRh(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_lh_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRh_s1(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_lh_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRl(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_ll_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRl_s1(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_ll_s1(rs, rt) +} + +/// `Rx32-=mpy(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RhRh(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_hh_s0(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RhRh_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_hh_s1(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RhRl(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_hl_s0(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RhRl_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_hl_s1(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RlRh(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_lh_s0(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RlRh_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_lh_s1(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RlRl(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_ll_s0(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RlRl_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_ll_s1(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.h,Rt32.h):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RhRh_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_sat_hh_s0(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.h,Rt32.h):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RhRh_s1_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_sat_hh_s1(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.h,Rt32.l):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RhRl_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_sat_hl_s0(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.h,Rt32.l):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RhRl_s1_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_sat_hl_s1(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.l,Rt32.h):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RlRh_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_sat_lh_s0(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.l,Rt32.h):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RlRh_s1_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_sat_lh_s1(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.l,Rt32.l):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RlRl_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_sat_ll_s0(rx, rs, rt) +} + +/// `Rx32-=mpy(Rs32.l,Rt32.l):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RlRl_s1_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_nac_sat_ll_s1(rx, rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.h):rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRh_rnd(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_rnd_hh_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRh_s1_rnd(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_rnd_hh_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.l):rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRl_rnd(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_rnd_hl_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRl_s1_rnd(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_rnd_hl_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.h):rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRh_rnd(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_rnd_lh_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRh_s1_rnd(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_rnd_lh_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.l):rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRl_rnd(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_rnd_ll_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRl_s1_rnd(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_rnd_ll_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.h):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRh_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_hh_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.h):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRh_s1_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_hh_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.l):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRl_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_hl_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.l):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRl_s1_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_hl_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.h):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRh_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_lh_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.h):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRh_s1_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_lh_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.l):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRl_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_ll_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.l):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRl_s1_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_ll_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.h):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRh_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_rnd_hh_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRh_s1_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_rnd_hh_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.l):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRl_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_rnd_hl_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RhRl_s1_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_rnd_hl_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.h):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRh_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_rnd_lh_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRh_s1_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_rnd_lh_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.l):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRl_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_rnd_ll_s0(rs, rt) +} + +/// `Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RlRl_s1_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_sat_rnd_ll_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RR(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_up(rs, rt) +} + +/// `Rd32=mpy(Rs32,Rt32):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RR_s1(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_up_s1(rs, rt) +} + +/// `Rd32=mpy(Rs32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpy_RR_s1_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpy_up_s1_sat(rs, rt) +} + +/// `Rxx32+=mpy(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyacc_RhRh(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_acc_hh_s0(rxx, rs, rt) +} + +/// `Rxx32+=mpy(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyacc_RhRh_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_acc_hh_s1(rxx, rs, rt) +} + +/// `Rxx32+=mpy(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyacc_RhRl(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_acc_hl_s0(rxx, rs, rt) +} + +/// `Rxx32+=mpy(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyacc_RhRl_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_acc_hl_s1(rxx, rs, rt) +} + +/// `Rxx32+=mpy(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyacc_RlRh(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_acc_lh_s0(rxx, rs, rt) +} + +/// `Rxx32+=mpy(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyacc_RlRh_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_acc_lh_s1(rxx, rs, rt) +} + +/// `Rxx32+=mpy(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyacc_RlRl(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_acc_ll_s0(rxx, rs, rt) +} + +/// `Rxx32+=mpy(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyacc_RlRl_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_acc_ll_s1(rxx, rs, rt) +} + +/// `Rdd32=mpy(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RhRh(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_hh_s0(rs, rt) +} + +/// `Rdd32=mpy(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RhRh_s1(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_hh_s1(rs, rt) +} + +/// `Rdd32=mpy(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RhRl(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_hl_s0(rs, rt) +} + +/// `Rdd32=mpy(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RhRl_s1(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_hl_s1(rs, rt) +} + +/// `Rdd32=mpy(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RlRh(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_lh_s0(rs, rt) +} + +/// `Rdd32=mpy(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RlRh_s1(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_lh_s1(rs, rt) +} + +/// `Rdd32=mpy(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RlRl(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_ll_s0(rs, rt) +} + +/// `Rdd32=mpy(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RlRl_s1(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_ll_s1(rs, rt) +} + +/// `Rxx32-=mpy(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpynac_RhRh(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_nac_hh_s0(rxx, rs, rt) +} + +/// `Rxx32-=mpy(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpynac_RhRh_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_nac_hh_s1(rxx, rs, rt) +} + +/// `Rxx32-=mpy(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpynac_RhRl(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_nac_hl_s0(rxx, rs, rt) +} + +/// `Rxx32-=mpy(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpynac_RhRl_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_nac_hl_s1(rxx, rs, rt) +} + +/// `Rxx32-=mpy(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpynac_RlRh(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_nac_lh_s0(rxx, rs, rt) +} + +/// `Rxx32-=mpy(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpynac_RlRh_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_nac_lh_s1(rxx, rs, rt) +} + +/// `Rxx32-=mpy(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpynac_RlRl(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_nac_ll_s0(rxx, rs, rt) +} + +/// `Rxx32-=mpy(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpynac_RlRl_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_nac_ll_s1(rxx, rs, rt) +} + +/// `Rdd32=mpy(Rs32.h,Rt32.h):rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RhRh_rnd(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_rnd_hh_s0(rs, rt) +} + +/// `Rdd32=mpy(Rs32.h,Rt32.h):<<1:rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RhRh_s1_rnd(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_rnd_hh_s1(rs, rt) +} + +/// `Rdd32=mpy(Rs32.h,Rt32.l):rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RhRl_rnd(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_rnd_hl_s0(rs, rt) +} + +/// `Rdd32=mpy(Rs32.h,Rt32.l):<<1:rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RhRl_s1_rnd(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_rnd_hl_s1(rs, rt) +} + +/// `Rdd32=mpy(Rs32.l,Rt32.h):rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RlRh_rnd(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_rnd_lh_s0(rs, rt) +} + +/// `Rdd32=mpy(Rs32.l,Rt32.h):<<1:rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RlRh_s1_rnd(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_rnd_lh_s1(rs, rt) +} + +/// `Rdd32=mpy(Rs32.l,Rt32.l):rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RlRl_rnd(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_rnd_ll_s0(rs, rt) +} + +/// `Rdd32=mpy(Rs32.l,Rt32.l):<<1:rnd` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpy_RlRl_s1_rnd(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyd_rnd_ll_s1(rs, rt) +} + +/// `Rd32=mpyi(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyi_RR(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyi(rs, rt) +} + +/// `Rd32=mpyi(Rs32,#m9)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyi_RI(rs: i32, im9: i32) -> i32 { + hexagon_M2_mpysmi(rs, im9) +} + +/// `Rd32=mpysu(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpysu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpysu_RR(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpysu_up(rs, rt) +} + +/// `Rx32+=mpyu(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyuacc_RhRh(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_acc_hh_s0(rx, rs, rt) +} + +/// `Rx32+=mpyu(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyuacc_RhRh_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_acc_hh_s1(rx, rs, rt) +} + +/// `Rx32+=mpyu(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyuacc_RhRl(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_acc_hl_s0(rx, rs, rt) +} + +/// `Rx32+=mpyu(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyuacc_RhRl_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_acc_hl_s1(rx, rs, rt) +} + +/// `Rx32+=mpyu(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyuacc_RlRh(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_acc_lh_s0(rx, rs, rt) +} + +/// `Rx32+=mpyu(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyuacc_RlRh_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_acc_lh_s1(rx, rs, rt) +} + +/// `Rx32+=mpyu(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyuacc_RlRl(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_acc_ll_s0(rx, rs, rt) +} + +/// `Rx32+=mpyu(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyuacc_RlRl_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_acc_ll_s1(rx, rs, rt) +} + +/// `Rd32=mpyu(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyu_RhRh(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_hh_s0(rs, rt) +} + +/// `Rd32=mpyu(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyu_RhRh_s1(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_hh_s1(rs, rt) +} + +/// `Rd32=mpyu(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyu_RhRl(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_hl_s0(rs, rt) +} + +/// `Rd32=mpyu(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyu_RhRl_s1(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_hl_s1(rs, rt) +} + +/// `Rd32=mpyu(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyu_RlRh(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_lh_s0(rs, rt) +} + +/// `Rd32=mpyu(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyu_RlRh_s1(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_lh_s1(rs, rt) +} + +/// `Rd32=mpyu(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyu_RlRl(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_ll_s0(rs, rt) +} + +/// `Rd32=mpyu(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyu_RlRl_s1(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_ll_s1(rs, rt) +} + +/// `Rx32-=mpyu(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyunac_RhRh(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_nac_hh_s0(rx, rs, rt) +} + +/// `Rx32-=mpyu(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyunac_RhRh_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_nac_hh_s1(rx, rs, rt) +} + +/// `Rx32-=mpyu(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyunac_RhRl(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_nac_hl_s0(rx, rs, rt) +} + +/// `Rx32-=mpyu(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyunac_RhRl_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_nac_hl_s1(rx, rs, rt) +} + +/// `Rx32-=mpyu(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyunac_RlRh(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_nac_lh_s0(rx, rs, rt) +} + +/// `Rx32-=mpyu(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyunac_RlRh_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_nac_lh_s1(rx, rs, rt) +} + +/// `Rx32-=mpyu(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyunac_RlRl(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_nac_ll_s0(rx, rs, rt) +} + +/// `Rx32-=mpyu(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyunac_RlRl_s1(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_nac_ll_s1(rx, rs, rt) +} + +/// `Rd32=mpyu(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyu_RR(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyu_up(rs, rt) +} + +/// `Rxx32+=mpyu(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyuacc_RhRh(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_acc_hh_s0(rxx, rs, rt) +} + +/// `Rxx32+=mpyu(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyuacc_RhRh_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_acc_hh_s1(rxx, rs, rt) +} + +/// `Rxx32+=mpyu(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyuacc_RhRl(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_acc_hl_s0(rxx, rs, rt) +} + +/// `Rxx32+=mpyu(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyuacc_RhRl_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_acc_hl_s1(rxx, rs, rt) +} + +/// `Rxx32+=mpyu(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyuacc_RlRh(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_acc_lh_s0(rxx, rs, rt) +} + +/// `Rxx32+=mpyu(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyuacc_RlRh_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_acc_lh_s1(rxx, rs, rt) +} + +/// `Rxx32+=mpyu(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyuacc_RlRl(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_acc_ll_s0(rxx, rs, rt) +} + +/// `Rxx32+=mpyu(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyuacc_RlRl_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_acc_ll_s1(rxx, rs, rt) +} + +/// `Rdd32=mpyu(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyu_RhRh(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_hh_s0(rs, rt) +} + +/// `Rdd32=mpyu(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyu_RhRh_s1(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_hh_s1(rs, rt) +} + +/// `Rdd32=mpyu(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyu_RhRl(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_hl_s0(rs, rt) +} + +/// `Rdd32=mpyu(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyu_RhRl_s1(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_hl_s1(rs, rt) +} + +/// `Rdd32=mpyu(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyu_RlRh(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_lh_s0(rs, rt) +} + +/// `Rdd32=mpyu(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyu_RlRh_s1(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_lh_s1(rs, rt) +} + +/// `Rdd32=mpyu(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyu_RlRl(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_ll_s0(rs, rt) +} + +/// `Rdd32=mpyu(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyu_RlRl_s1(rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_ll_s1(rs, rt) +} + +/// `Rxx32-=mpyu(Rs32.h,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyunac_RhRh(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_nac_hh_s0(rxx, rs, rt) +} + +/// `Rxx32-=mpyu(Rs32.h,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyunac_RhRh_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_nac_hh_s1(rxx, rs, rt) +} + +/// `Rxx32-=mpyu(Rs32.h,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyunac_RhRl(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_nac_hl_s0(rxx, rs, rt) +} + +/// `Rxx32-=mpyu(Rs32.h,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyunac_RhRl_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_nac_hl_s1(rxx, rs, rt) +} + +/// `Rxx32-=mpyu(Rs32.l,Rt32.h)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyunac_RlRh(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_nac_lh_s0(rxx, rs, rt) +} + +/// `Rxx32-=mpyu(Rs32.l,Rt32.h):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyunac_RlRh_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_nac_lh_s1(rxx, rs, rt) +} + +/// `Rxx32-=mpyu(Rs32.l,Rt32.l)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyunac_RlRl(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_nac_ll_s0(rxx, rs, rt) +} + +/// `Rxx32-=mpyu(Rs32.l,Rt32.l):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_mpyunac_RlRl_s1(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_mpyud_nac_ll_s1(rxx, rs, rt) +} + +/// `Rd32=mpyui(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpyui))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyui_RR(rs: i32, rt: i32) -> i32 { + hexagon_M2_mpyui(rs, rt) +} + +/// `Rx32-=add(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_addnac_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_nacci(rx, rs, rt) +} + +/// `Rx32-=add(Rs32,#s8)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(add, IS8 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_addnac_RI(rx: i32, rs: i32) -> i32 { + static_assert_simm_bits!(IS8, 8); + hexagon_M2_naccii(rx, rs, IS8) +} + +/// `Rx32+=sub(Rt32,Rs32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(sub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_subacc_RR(rx: i32, rt: i32, rs: i32) -> i32 { + hexagon_M2_subacc(rx, rt, rs) +} + +/// `Rdd32=vabsdiffh(Rtt32,Rss32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vabsdiffh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vabsdiffh_PP(rtt: i64, rss: i64) -> i64 { + hexagon_M2_vabsdiffh(rtt, rss) +} + +/// `Rdd32=vabsdiffw(Rtt32,Rss32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vabsdiffw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vabsdiffw_PP(rtt: i64, rss: i64) -> i64 { + hexagon_M2_vabsdiffw(rtt, rss) +} + +/// `Rxx32+=vcmpyi(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vcmpyiacc_PP_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vcmac_s0_sat_i(rxx, rss, rtt) +} + +/// `Rxx32+=vcmpyr(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpyr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vcmpyracc_PP_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vcmac_s0_sat_r(rxx, rss, rtt) +} + +/// `Rdd32=vcmpyi(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vcmpyi_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vcmpy_s0_sat_i(rss, rtt) +} + +/// `Rdd32=vcmpyr(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpyr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vcmpyr_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vcmpy_s0_sat_r(rss, rtt) +} + +/// `Rdd32=vcmpyi(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vcmpyi_PP_s1_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vcmpy_s1_sat_i(rss, rtt) +} + +/// `Rdd32=vcmpyr(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcmpyr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vcmpyr_PP_s1_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vcmpy_s1_sat_r(rss, rtt) +} + +/// `Rxx32+=vdmpy(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vdmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vdmpyacc_PP_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vdmacs_s0(rxx, rss, rtt) +} + +/// `Rxx32+=vdmpy(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vdmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vdmpyacc_PP_s1_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vdmacs_s1(rxx, rss, rtt) +} + +/// `Rd32=vdmpy(Rss32,Rtt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vdmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vdmpy_PP_rnd_sat(rss: i64, rtt: i64) -> i32 { + hexagon_M2_vdmpyrs_s0(rss, rtt) +} + +/// `Rd32=vdmpy(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vdmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vdmpy_PP_s1_rnd_sat(rss: i64, rtt: i64) -> i32 { + hexagon_M2_vdmpyrs_s1(rss, rtt) +} + +/// `Rdd32=vdmpy(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vdmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vdmpy_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vdmpys_s0(rss, rtt) +} + +/// `Rdd32=vdmpy(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vdmpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vdmpy_PP_s1_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vdmpys_s1(rss, rtt) +} + +/// `Rxx32+=vmpyh(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyhacc_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_vmac2(rxx, rs, rt) +} + +/// `Rxx32+=vmpyeh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyehacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vmac2es(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyeh(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyehacc_PP_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vmac2es_s0(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyeh(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyehacc_PP_s1_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vmac2es_s1(rxx, rss, rtt) +} + +/// `Rxx32+=vmpyh(Rs32,Rt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyhacc_RR_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_vmac2s_s0(rxx, rs, rt) +} + +/// `Rxx32+=vmpyh(Rs32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyhacc_RR_s1_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_vmac2s_s1(rxx, rs, rt) +} + +/// `Rxx32+=vmpyhsu(Rs32,Rt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyhsu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyhsuacc_RR_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_vmac2su_s0(rxx, rs, rt) +} + +/// `Rxx32+=vmpyhsu(Rs32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyhsu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyhsuacc_RR_s1_sat(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M2_vmac2su_s1(rxx, rs, rt) +} + +/// `Rdd32=vmpyeh(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyeh_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vmpy2es_s0(rss, rtt) +} + +/// `Rdd32=vmpyeh(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyeh_PP_s1_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vmpy2es_s1(rss, rtt) +} + +/// `Rdd32=vmpyh(Rs32,Rt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyh_RR_sat(rs: i32, rt: i32) -> i64 { + hexagon_M2_vmpy2s_s0(rs, rt) +} + +/// `Rd32=vmpyh(Rs32,Rt32):rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vmpyh_RR_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_vmpy2s_s0pack(rs, rt) +} + +/// `Rdd32=vmpyh(Rs32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyh_RR_s1_sat(rs: i32, rt: i32) -> i64 { + hexagon_M2_vmpy2s_s1(rs, rt) +} + +/// `Rd32=vmpyh(Rs32,Rt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vmpyh_RR_s1_rnd_sat(rs: i32, rt: i32) -> i32 { + hexagon_M2_vmpy2s_s1pack(rs, rt) +} + +/// `Rdd32=vmpyhsu(Rs32,Rt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyhsu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyhsu_RR_sat(rs: i32, rt: i32) -> i64 { + hexagon_M2_vmpy2su_s0(rs, rt) +} + +/// `Rdd32=vmpyhsu(Rs32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpyhsu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpyhsu_RR_s1_sat(rs: i32, rt: i32) -> i64 { + hexagon_M2_vmpy2su_s1(rs, rt) +} + +/// `Rd32=vraddh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vraddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vraddh_PP(rss: i64, rtt: i64) -> i32 { + hexagon_M2_vraddh(rss, rtt) +} + +/// `Rd32=vradduh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vradduh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vradduh_PP(rss: i64, rtt: i64) -> i32 { + hexagon_M2_vradduh(rss, rtt) +} + +/// `Rxx32+=vrcmpyi(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcmpyiacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vrcmaci_s0(rxx, rss, rtt) +} + +/// `Rxx32+=vrcmpyi(Rss32,Rtt32*)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcmpyiacc_PP_conj(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vrcmaci_s0c(rxx, rss, rtt) +} + +/// `Rxx32+=vrcmpyr(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpyr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcmpyracc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vrcmacr_s0(rxx, rss, rtt) +} + +/// `Rxx32+=vrcmpyr(Rss32,Rtt32*)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpyr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcmpyracc_PP_conj(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vrcmacr_s0c(rxx, rss, rtt) +} + +/// `Rdd32=vrcmpyi(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcmpyi_PP(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vrcmpyi_s0(rss, rtt) +} + +/// `Rdd32=vrcmpyi(Rss32,Rtt32*)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcmpyi_PP_conj(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vrcmpyi_s0c(rss, rtt) +} + +/// `Rdd32=vrcmpyr(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpyr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcmpyr_PP(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vrcmpyr_s0(rss, rtt) +} + +/// `Rdd32=vrcmpyr(Rss32,Rtt32*)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpyr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcmpyr_PP_conj(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vrcmpyr_s0c(rss, rtt) +} + +/// `Rxx32+=vrcmpys(Rss32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpys))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcmpysacc_PR_s1_sat(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_M2_vrcmpys_acc_s1(rxx, rss, rt) +} + +/// `Rdd32=vrcmpys(Rss32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpys))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcmpys_PR_s1_sat(rss: i64, rt: i32) -> i64 { + hexagon_M2_vrcmpys_s1(rss, rt) +} + +/// `Rd32=vrcmpys(Rss32,Rt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcmpys))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vrcmpys_PR_s1_rnd_sat(rss: i64, rt: i32) -> i32 { + hexagon_M2_vrcmpys_s1rp(rss, rt) +} + +/// `Rxx32+=vrmpyh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpyhacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M2_vrmac_s0(rxx, rss, rtt) +} + +/// `Rdd32=vrmpyh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpyh_PP(rss: i64, rtt: i64) -> i64 { + hexagon_M2_vrmpy_s0(rss, rtt) +} + +/// `Rx32^=xor(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(xor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_xorxacc_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_xor_xacc(rx, rs, rt) +} + +/// `Rx32&=and(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_andand_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_and_and(rx, rs, rt) +} + +/// `Rx32&=and(Rs32,~Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_andand_RnR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_and_andn(rx, rs, rt) +} + +/// `Rx32&=or(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_orand_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_and_or(rx, rs, rt) +} + +/// `Rx32&=xor(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(xor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_xorand_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_and_xor(rx, rs, rt) +} + +/// `Rd32=cmpyiwh(Rss32,Rt32):<<1:rnd:sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpyiwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyiwh_PR_s1_rnd_sat(rss: i64, rt: i32) -> i32 { + hexagon_M4_cmpyi_wh(rss, rt) +} + +/// `Rd32=cmpyiwh(Rss32,Rt32*):<<1:rnd:sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpyiwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyiwh_PR_conj_s1_rnd_sat(rss: i64, rt: i32) -> i32 { + hexagon_M4_cmpyi_whc(rss, rt) +} + +/// `Rd32=cmpyrwh(Rss32,Rt32):<<1:rnd:sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpyrwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyrwh_PR_s1_rnd_sat(rss: i64, rt: i32) -> i32 { + hexagon_M4_cmpyr_wh(rss, rt) +} + +/// `Rd32=cmpyrwh(Rss32,Rt32*):<<1:rnd:sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cmpyrwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyrwh_PR_conj_s1_rnd_sat(rss: i64, rt: i32) -> i32 { + hexagon_M4_cmpyr_whc(rss, rt) +} + +/// `Rx32+=mpy(Rs32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyacc_RR_s1_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_mac_up_s1_sat(rx, rs, rt) +} + +/// `Rd32=add(#u6,mpyi(Rs32,#U6))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0, 2)] +#[cfg_attr(test, assert_instr(add, IU6 = 0, IU6_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_mpyi_IRI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU6, 6); + static_assert_uimm_bits!(IU6_2, 6); + hexagon_M4_mpyri_addi(IU6 as i32, rs, IU6_2 as i32) +} + +/// `Rd32=add(Ru32,mpyi(Rs32,#u6))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(add, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_mpyi_RRI(ru: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU6, 6); + hexagon_M4_mpyri_addr(ru, rs, IU6 as i32) +} + +/// `Rd32=add(Ru32,mpyi(#u6:2,Rs32))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(add, IU6_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_mpyi_RIR(ru: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU6_2, 6); + hexagon_M4_mpyri_addr_u2(ru, IU6_2 as i32, rs) +} + +/// `Rd32=add(#u6,mpyi(Rs32,Rt32))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0)] +#[cfg_attr(test, assert_instr(add, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_mpyi_IRR(rs: i32, rt: i32) -> i32 { + static_assert_uimm_bits!(IU6, 6); + hexagon_M4_mpyrr_addi(IU6 as i32, rs, rt) +} + +/// `Ry32=add(Ru32,mpyi(Ry32,Rs32))` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_mpyi_RRR(ru: i32, ry: i32, rs: i32) -> i32 { + hexagon_M4_mpyrr_addr(ru, ry, rs) +} + +/// `Rx32-=mpy(Rs32,Rt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(mpy))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpynac_RR_s1_sat(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_nac_up_s1_sat(rx, rs, rt) +} + +/// `Rx32|=and(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_andor_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_or_and(rx, rs, rt) +} + +/// `Rx32|=and(Rs32,~Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_andor_RnR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_or_andn(rx, rs, rt) +} + +/// `Rx32|=or(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_oror_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_or_or(rx, rs, rt) +} + +/// `Rx32|=xor(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(xor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_xoror_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_or_xor(rx, rs, rt) +} + +/// `Rdd32=pmpyw(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(pmpyw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_pmpyw_RR(rs: i32, rt: i32) -> i64 { + hexagon_M4_pmpyw(rs, rt) +} + +/// `Rxx32^=pmpyw(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(pmpyw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_pmpywxacc_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M4_pmpyw_acc(rxx, rs, rt) +} + +/// `Rdd32=vpmpyh(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vpmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vpmpyh_RR(rs: i32, rt: i32) -> i64 { + hexagon_M4_vpmpyh(rs, rt) +} + +/// `Rxx32^=vpmpyh(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vpmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vpmpyhxacc_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M4_vpmpyh_acc(rxx, rs, rt) +} + +/// `Rxx32+=vrmpyweh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpywehacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M4_vrmpyeh_acc_s0(rxx, rss, rtt) +} + +/// `Rxx32+=vrmpyweh(Rss32,Rtt32):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpywehacc_PP_s1(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M4_vrmpyeh_acc_s1(rxx, rss, rtt) +} + +/// `Rdd32=vrmpyweh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpyweh_PP(rss: i64, rtt: i64) -> i64 { + hexagon_M4_vrmpyeh_s0(rss, rtt) +} + +/// `Rdd32=vrmpyweh(Rss32,Rtt32):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpyweh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpyweh_PP_s1(rss: i64, rtt: i64) -> i64 { + hexagon_M4_vrmpyeh_s1(rss, rtt) +} + +/// `Rxx32+=vrmpywoh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpywohacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M4_vrmpyoh_acc_s0(rxx, rss, rtt) +} + +/// `Rxx32+=vrmpywoh(Rss32,Rtt32):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpywohacc_PP_s1(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M4_vrmpyoh_acc_s1(rxx, rss, rtt) +} + +/// `Rdd32=vrmpywoh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpywoh_PP(rss: i64, rtt: i64) -> i64 { + hexagon_M4_vrmpyoh_s0(rss, rtt) +} + +/// `Rdd32=vrmpywoh(Rss32,Rtt32):<<1` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpywoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpywoh_PP_s1(rss: i64, rtt: i64) -> i64 { + hexagon_M4_vrmpyoh_s1(rss, rtt) +} + +/// `Rx32^=and(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_andxacc_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_xor_and(rx, rs, rt) +} + +/// `Rx32^=and(Rs32,~Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(and))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_andxacc_RnR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_xor_andn(rx, rs, rt) +} + +/// `Rx32^=or(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(or))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_orxacc_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M4_xor_or(rx, rs, rt) +} + +/// `Rxx32^=xor(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(xor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_xorxacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M4_xor_xacc(rxx, rss, rtt) +} + +/// `Rxx32+=vdmpybsu(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vdmpybsu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vdmpybsuacc_PP_sat(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M5_vdmacbsu(rxx, rss, rtt) +} + +/// `Rdd32=vdmpybsu(Rss32,Rtt32):sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vdmpybsu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vdmpybsu_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_M5_vdmpybsu(rss, rtt) +} + +/// `Rxx32+=vmpybsu(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpybsu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpybsuacc_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M5_vmacbsu(rxx, rs, rt) +} + +/// `Rxx32+=vmpybu(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpybu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpybuacc_RR(rxx: i64, rs: i32, rt: i32) -> i64 { + hexagon_M5_vmacbuu(rxx, rs, rt) +} + +/// `Rdd32=vmpybsu(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpybsu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpybsu_RR(rs: i32, rt: i32) -> i64 { + hexagon_M5_vmpybsu(rs, rt) +} + +/// `Rdd32=vmpybu(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vmpybu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vmpybu_RR(rs: i32, rt: i32) -> i64 { + hexagon_M5_vmpybuu(rs, rt) +} + +/// `Rxx32+=vrmpybsu(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpybsu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpybsuacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M5_vrmacbsu(rxx, rss, rtt) +} + +/// `Rxx32+=vrmpybu(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpybu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpybuacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M5_vrmacbuu(rxx, rss, rtt) +} + +/// `Rdd32=vrmpybsu(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpybsu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpybsu_PP(rss: i64, rtt: i64) -> i64 { + hexagon_M5_vrmpybsu(rss, rtt) +} + +/// `Rdd32=vrmpybu(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrmpybu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrmpybu_PP(rss: i64, rtt: i64) -> i64 { + hexagon_M5_vrmpybuu(rss, rtt) +} + +/// `Rd32=addasl(Rt32,Rs32,#u3)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(addasl, IU3 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_addasl_RRI(rt: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU3, 3); + hexagon_S2_addasl_rrri(rt, rs, IU3 as i32) +} + +/// `Rdd32=asl(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(asl, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asl_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asl_i_p(rss, IU6 as i32) +} + +/// `Rxx32+=asl(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asl, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_aslacc_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asl_i_p_acc(rxx, rss, IU6 as i32) +} + +/// `Rxx32&=asl(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asl, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asland_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asl_i_p_and(rxx, rss, IU6 as i32) +} + +/// `Rxx32-=asl(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asl, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_aslnac_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asl_i_p_nac(rxx, rss, IU6 as i32) +} + +/// `Rxx32|=asl(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asl, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_aslor_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asl_i_p_or(rxx, rss, IU6 as i32) +} + +/// `Rxx32^=asl(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asl, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_aslxacc_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asl_i_p_xacc(rxx, rss, IU6 as i32) +} + +/// `Rd32=asl(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(asl, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asl_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asl_i_r(rs, IU5 as i32) +} + +/// `Rx32+=asl(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asl, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_aslacc_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asl_i_r_acc(rx, rs, IU5 as i32) +} + +/// `Rx32&=asl(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asl, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asland_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asl_i_r_and(rx, rs, IU5 as i32) +} + +/// `Rx32-=asl(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asl, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_aslnac_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asl_i_r_nac(rx, rs, IU5 as i32) +} + +/// `Rx32|=asl(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asl, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_aslor_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asl_i_r_or(rx, rs, IU5 as i32) +} + +/// `Rd32=asl(Rs32,#u5):sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(asl, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asl_RI_sat(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asl_i_r_sat(rs, IU5 as i32) +} + +/// `Rx32^=asl(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asl, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_aslxacc_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asl_i_r_xacc(rx, rs, IU5 as i32) +} + +/// `Rdd32=vaslh(Rss32,#u4)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vaslh, IU4 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaslh_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU4, 4); + hexagon_S2_asl_i_vh(rss, IU4 as i32) +} + +/// `Rdd32=vaslw(Rss32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vaslw, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaslw_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asl_i_vw(rss, IU5 as i32) +} + +/// `Rdd32=asl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asl_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_asl_r_p(rss, rt) +} + +/// `Rxx32+=asl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_aslacc_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_asl_r_p_acc(rxx, rss, rt) +} + +/// `Rxx32&=asl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asland_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_asl_r_p_and(rxx, rss, rt) +} + +/// `Rxx32-=asl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_aslnac_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_asl_r_p_nac(rxx, rss, rt) +} + +/// `Rxx32|=asl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_aslor_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_asl_r_p_or(rxx, rss, rt) +} + +/// `Rxx32^=asl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_aslxacc_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_asl_r_p_xor(rxx, rss, rt) +} + +/// `Rd32=asl(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asl_RR(rs: i32, rt: i32) -> i32 { + hexagon_S2_asl_r_r(rs, rt) +} + +/// `Rx32+=asl(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_aslacc_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_asl_r_r_acc(rx, rs, rt) +} + +/// `Rx32&=asl(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asland_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_asl_r_r_and(rx, rs, rt) +} + +/// `Rx32-=asl(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_aslnac_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_asl_r_r_nac(rx, rs, rt) +} + +/// `Rx32|=asl(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_aslor_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_asl_r_r_or(rx, rs, rt) +} + +/// `Rd32=asl(Rs32,Rt32):sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asl_RR_sat(rs: i32, rt: i32) -> i32 { + hexagon_S2_asl_r_r_sat(rs, rt) +} + +/// `Rdd32=vaslh(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaslh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaslh_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_asl_r_vh(rss, rt) +} + +/// `Rdd32=vaslw(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vaslw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vaslw_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_asl_r_vw(rss, rt) +} + +/// `Rdd32=asr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(asr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asr_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asr_i_p(rss, IU6 as i32) +} + +/// `Rxx32+=asr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asracc_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asr_i_p_acc(rxx, rss, IU6 as i32) +} + +/// `Rxx32&=asr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asrand_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asr_i_p_and(rxx, rss, IU6 as i32) +} + +/// `Rxx32-=asr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asrnac_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asr_i_p_nac(rxx, rss, IU6 as i32) +} + +/// `Rxx32|=asr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asror_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asr_i_p_or(rxx, rss, IU6 as i32) +} + +/// `Rdd32=asr(Rss32,#u6):rnd` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(asr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asr_PI_rnd(rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asr_i_p_rnd(rss, IU6 as i32) +} + +/// `Rdd32=asrrnd(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(asrrnd, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asrrnd_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_asr_i_p_rnd_goodsyntax(rss, IU6 as i32) +} + +/// `Rd32=asr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(asr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asr_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asr_i_r(rs, IU5 as i32) +} + +/// `Rx32+=asr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asracc_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asr_i_r_acc(rx, rs, IU5 as i32) +} + +/// `Rx32&=asr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asrand_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asr_i_r_and(rx, rs, IU5 as i32) +} + +/// `Rx32-=asr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asrnac_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asr_i_r_nac(rx, rs, IU5 as i32) +} + +/// `Rx32|=asr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(asr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asror_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asr_i_r_or(rx, rs, IU5 as i32) +} + +/// `Rd32=asr(Rs32,#u5):rnd` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(asr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asr_RI_rnd(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asr_i_r_rnd(rs, IU5 as i32) +} + +/// `Rd32=asrrnd(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(asrrnd, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asrrnd_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asr_i_r_rnd_goodsyntax(rs, IU5 as i32) +} + +/// `Rd32=vasrw(Rss32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vasrw, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vasrw_PI(rss: i64) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asr_i_svw_trun(rss, IU5 as i32) +} + +/// `Rdd32=vasrh(Rss32,#u4)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vasrh, IU4 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vasrh_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU4, 4); + hexagon_S2_asr_i_vh(rss, IU4 as i32) +} + +/// `Rdd32=vasrw(Rss32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vasrw, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vasrw_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_asr_i_vw(rss, IU5 as i32) +} + +/// `Rdd32=asr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asr_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_asr_r_p(rss, rt) +} + +/// `Rxx32+=asr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asracc_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_asr_r_p_acc(rxx, rss, rt) +} + +/// `Rxx32&=asr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asrand_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_asr_r_p_and(rxx, rss, rt) +} + +/// `Rxx32-=asr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asrnac_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_asr_r_p_nac(rxx, rss, rt) +} + +/// `Rxx32|=asr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asror_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_asr_r_p_or(rxx, rss, rt) +} + +/// `Rxx32^=asr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_asrxacc_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_asr_r_p_xor(rxx, rss, rt) +} + +/// `Rd32=asr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asr_RR(rs: i32, rt: i32) -> i32 { + hexagon_S2_asr_r_r(rs, rt) +} + +/// `Rx32+=asr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asracc_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_asr_r_r_acc(rx, rs, rt) +} + +/// `Rx32&=asr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asrand_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_asr_r_r_and(rx, rs, rt) +} + +/// `Rx32-=asr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asrnac_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_asr_r_r_nac(rx, rs, rt) +} + +/// `Rx32|=asr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asror_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_asr_r_r_or(rx, rs, rt) +} + +/// `Rd32=asr(Rs32,Rt32):sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(asr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_asr_RR_sat(rs: i32, rt: i32) -> i32 { + hexagon_S2_asr_r_r_sat(rs, rt) +} + +/// `Rd32=vasrw(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vasrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vasrw_PR(rss: i64, rt: i32) -> i32 { + hexagon_S2_asr_r_svw_trun(rss, rt) +} + +/// `Rdd32=vasrh(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vasrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vasrh_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_asr_r_vh(rss, rt) +} + +/// `Rdd32=vasrw(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vasrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vasrw_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_asr_r_vw(rss, rt) +} + +/// `Rd32=brev(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(brev))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_brev_R(rs: i32) -> i32 { + hexagon_S2_brev(rs) +} + +/// `Rdd32=brev(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(brev))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_brev_P(rss: i64) -> i64 { + hexagon_S2_brevp(rss) +} + +/// `Rd32=cl0(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cl0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cl0_R(rs: i32) -> i32 { + hexagon_S2_cl0(rs) +} + +/// `Rd32=cl0(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cl0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cl0_P(rss: i64) -> i32 { + hexagon_S2_cl0p(rss) +} + +/// `Rd32=cl1(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cl1))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cl1_R(rs: i32) -> i32 { + hexagon_S2_cl1(rs) +} + +/// `Rd32=cl1(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(cl1))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cl1_P(rss: i64) -> i32 { + hexagon_S2_cl1p(rss) +} + +/// `Rd32=clb(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(clb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_clb_R(rs: i32) -> i32 { + hexagon_S2_clb(rs) +} + +/// `Rd32=normamt(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(normamt))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_normamt_R(rs: i32) -> i32 { + hexagon_S2_clbnorm(rs) +} + +/// `Rd32=clb(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(clb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_clb_P(rss: i64) -> i32 { + hexagon_S2_clbp(rss) +} + +/// `Rd32=clrbit(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(clrbit, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_clrbit_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_clrbit_i(rs, IU5 as i32) +} + +/// `Rd32=clrbit(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(clrbit))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_clrbit_RR(rs: i32, rt: i32) -> i32 { + hexagon_S2_clrbit_r(rs, rt) +} + +/// `Rd32=ct0(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(ct0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_ct0_R(rs: i32) -> i32 { + hexagon_S2_ct0(rs) +} + +/// `Rd32=ct0(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(ct0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_ct0_P(rss: i64) -> i32 { + hexagon_S2_ct0p(rss) +} + +/// `Rd32=ct1(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(ct1))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_ct1_R(rs: i32) -> i32 { + hexagon_S2_ct1(rs) +} + +/// `Rd32=ct1(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(ct1))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_ct1_P(rss: i64) -> i32 { + hexagon_S2_ct1p(rss) +} + +/// `Rdd32=deinterleave(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(deinterleave))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_deinterleave_P(rss: i64) -> i64 { + hexagon_S2_deinterleave(rss) +} + +/// `Rd32=extractu(Rs32,#u5,#U5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1, 2)] +#[cfg_attr(test, assert_instr(extractu, IU5 = 0, IU5_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_extractu_RII(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + static_assert_uimm_bits!(IU5_2, 5); + hexagon_S2_extractu(rs, IU5 as i32, IU5_2 as i32) +} + +/// `Rd32=extractu(Rs32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(extractu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_extractu_RP(rs: i32, rtt: i64) -> i32 { + hexagon_S2_extractu_rp(rs, rtt) +} + +/// `Rdd32=extractu(Rss32,#u6,#U6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1, 2)] +#[cfg_attr(test, assert_instr(extractu, IU6 = 0, IU6_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_extractu_PII(rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + static_assert_uimm_bits!(IU6_2, 6); + hexagon_S2_extractup(rss, IU6 as i32, IU6_2 as i32) +} + +/// `Rdd32=extractu(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(extractu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_extractu_PP(rss: i64, rtt: i64) -> i64 { + hexagon_S2_extractup_rp(rss, rtt) +} + +/// `Rx32=insert(Rs32,#u5,#U5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2, 3)] +#[cfg_attr(test, assert_instr(insert, IU5 = 0, IU5_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_insert_RII(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + static_assert_uimm_bits!(IU5_2, 5); + hexagon_S2_insert(rx, rs, IU5 as i32, IU5_2 as i32) +} + +/// `Rx32=insert(Rs32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(insert))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_insert_RP(rx: i32, rs: i32, rtt: i64) -> i32 { + hexagon_S2_insert_rp(rx, rs, rtt) +} + +/// `Rxx32=insert(Rss32,#u6,#U6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2, 3)] +#[cfg_attr(test, assert_instr(insert, IU6 = 0, IU6_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_insert_PII(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + static_assert_uimm_bits!(IU6_2, 6); + hexagon_S2_insertp(rxx, rss, IU6 as i32, IU6_2 as i32) +} + +/// `Rxx32=insert(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(insert))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_insert_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_S2_insertp_rp(rxx, rss, rtt) +} + +/// `Rdd32=interleave(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(interleave))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_interleave_P(rss: i64) -> i64 { + hexagon_S2_interleave(rss) +} + +/// `Rdd32=lfs(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lfs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lfs_PP(rss: i64, rtt: i64) -> i64 { + hexagon_S2_lfsp(rss, rtt) +} + +/// `Rdd32=lsl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsl_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_lsl_r_p(rss, rt) +} + +/// `Rxx32+=lsl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lslacc_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_lsl_r_p_acc(rxx, rss, rt) +} + +/// `Rxx32&=lsl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsland_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_lsl_r_p_and(rxx, rss, rt) +} + +/// `Rxx32-=lsl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lslnac_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_lsl_r_p_nac(rxx, rss, rt) +} + +/// `Rxx32|=lsl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lslor_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_lsl_r_p_or(rxx, rss, rt) +} + +/// `Rxx32^=lsl(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lslxacc_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_lsl_r_p_xor(rxx, rss, rt) +} + +/// `Rd32=lsl(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsl_RR(rs: i32, rt: i32) -> i32 { + hexagon_S2_lsl_r_r(rs, rt) +} + +/// `Rx32+=lsl(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lslacc_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_lsl_r_r_acc(rx, rs, rt) +} + +/// `Rx32&=lsl(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsland_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_lsl_r_r_and(rx, rs, rt) +} + +/// `Rx32-=lsl(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lslnac_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_lsl_r_r_nac(rx, rs, rt) +} + +/// `Rx32|=lsl(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lslor_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_lsl_r_r_or(rx, rs, rt) +} + +/// `Rdd32=vlslh(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vlslh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vlslh_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_lsl_r_vh(rss, rt) +} + +/// `Rdd32=vlslw(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vlslw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vlslw_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_lsl_r_vw(rss, rt) +} + +/// `Rdd32=lsr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(lsr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsr_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_lsr_i_p(rss, IU6 as i32) +} + +/// `Rxx32+=lsr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(lsr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsracc_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_lsr_i_p_acc(rxx, rss, IU6 as i32) +} + +/// `Rxx32&=lsr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(lsr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsrand_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_lsr_i_p_and(rxx, rss, IU6 as i32) +} + +/// `Rxx32-=lsr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(lsr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsrnac_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_lsr_i_p_nac(rxx, rss, IU6 as i32) +} + +/// `Rxx32|=lsr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(lsr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsror_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_lsr_i_p_or(rxx, rss, IU6 as i32) +} + +/// `Rxx32^=lsr(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(lsr, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsrxacc_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S2_lsr_i_p_xacc(rxx, rss, IU6 as i32) +} + +/// `Rd32=lsr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(lsr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsr_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_lsr_i_r(rs, IU5 as i32) +} + +/// `Rx32+=lsr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(lsr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsracc_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_lsr_i_r_acc(rx, rs, IU5 as i32) +} + +/// `Rx32&=lsr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(lsr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsrand_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_lsr_i_r_and(rx, rs, IU5 as i32) +} + +/// `Rx32-=lsr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(lsr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsrnac_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_lsr_i_r_nac(rx, rs, IU5 as i32) +} + +/// `Rx32|=lsr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(lsr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsror_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_lsr_i_r_or(rx, rs, IU5 as i32) +} + +/// `Rx32^=lsr(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(lsr, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsrxacc_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_lsr_i_r_xacc(rx, rs, IU5 as i32) +} + +/// `Rdd32=vlsrh(Rss32,#u4)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vlsrh, IU4 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vlsrh_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU4, 4); + hexagon_S2_lsr_i_vh(rss, IU4 as i32) +} + +/// `Rdd32=vlsrw(Rss32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vlsrw, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vlsrw_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_lsr_i_vw(rss, IU5 as i32) +} + +/// `Rdd32=lsr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsr_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_lsr_r_p(rss, rt) +} + +/// `Rxx32+=lsr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsracc_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_lsr_r_p_acc(rxx, rss, rt) +} + +/// `Rxx32&=lsr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsrand_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_lsr_r_p_and(rxx, rss, rt) +} + +/// `Rxx32-=lsr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsrnac_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_lsr_r_p_nac(rxx, rss, rt) +} + +/// `Rxx32|=lsr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsror_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_lsr_r_p_or(rxx, rss, rt) +} + +/// `Rxx32^=lsr(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_lsrxacc_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_lsr_r_p_xor(rxx, rss, rt) +} + +/// `Rd32=lsr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsr_RR(rs: i32, rt: i32) -> i32 { + hexagon_S2_lsr_r_r(rs, rt) +} + +/// `Rx32+=lsr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsracc_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_lsr_r_r_acc(rx, rs, rt) +} + +/// `Rx32&=lsr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsrand_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_lsr_r_r_and(rx, rs, rt) +} + +/// `Rx32-=lsr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsrnac_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_lsr_r_r_nac(rx, rs, rt) +} + +/// `Rx32|=lsr(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(lsr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsror_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_S2_lsr_r_r_or(rx, rs, rt) +} + +/// `Rdd32=vlsrh(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vlsrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vlsrh_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_lsr_r_vh(rss, rt) +} + +/// `Rdd32=vlsrw(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vlsrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vlsrw_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_lsr_r_vw(rss, rt) +} + +/// `Rdd32=packhl(Rs32,Rt32)` +/// +/// Instruction Type: ALU32_3op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(packhl))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_packhl_RR(rs: i32, rt: i32) -> i64 { + hexagon_S2_packhl(rs, rt) +} + +/// `Rd32=parity(Rss32,Rtt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(parity))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_parity_PP(rss: i64, rtt: i64) -> i32 { + hexagon_S2_parityp(rss, rtt) +} + +/// `Rd32=setbit(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(setbit, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_setbit_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_setbit_i(rs, IU5 as i32) +} + +/// `Rd32=setbit(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(setbit))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_setbit_RR(rs: i32, rt: i32) -> i32 { + hexagon_S2_setbit_r(rs, rt) +} + +/// `Rdd32=shuffeb(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(shuffeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_shuffeb_PP(rss: i64, rtt: i64) -> i64 { + hexagon_S2_shuffeb(rss, rtt) +} + +/// `Rdd32=shuffeh(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(shuffeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_shuffeh_PP(rss: i64, rtt: i64) -> i64 { + hexagon_S2_shuffeh(rss, rtt) +} + +/// `Rdd32=shuffob(Rtt32,Rss32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(shuffob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_shuffob_PP(rtt: i64, rss: i64) -> i64 { + hexagon_S2_shuffob(rtt, rss) +} + +/// `Rdd32=shuffoh(Rtt32,Rss32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(shuffoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_shuffoh_PP(rtt: i64, rss: i64) -> i64 { + hexagon_S2_shuffoh(rtt, rss) +} + +/// `Rd32=vsathb(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsathb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vsathb_R(rs: i32) -> i32 { + hexagon_S2_svsathb(rs) +} + +/// `Rd32=vsathub(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsathub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vsathub_R(rs: i32) -> i32 { + hexagon_S2_svsathub(rs) +} + +/// `Rx32=tableidxb(Rs32,#u4,#U5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(2, 3)] +#[cfg_attr(test, assert_instr(tableidxb, IU4 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_tableidxb_RII(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU4, 4); + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_tableidxb_goodsyntax(rx, rs, IU4 as i32, IU5 as i32) +} + +/// `Rx32=tableidxd(Rs32,#u4,#U5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(2, 3)] +#[cfg_attr(test, assert_instr(tableidxd, IU4 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_tableidxd_RII(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU4, 4); + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_tableidxd_goodsyntax(rx, rs, IU4 as i32, IU5 as i32) +} + +/// `Rx32=tableidxh(Rs32,#u4,#U5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(2, 3)] +#[cfg_attr(test, assert_instr(tableidxh, IU4 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_tableidxh_RII(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU4, 4); + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_tableidxh_goodsyntax(rx, rs, IU4 as i32, IU5 as i32) +} + +/// `Rx32=tableidxw(Rs32,#u4,#U5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(2, 3)] +#[cfg_attr(test, assert_instr(tableidxw, IU4 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_tableidxw_RII(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU4, 4); + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_tableidxw_goodsyntax(rx, rs, IU4 as i32, IU5 as i32) +} + +/// `Rd32=togglebit(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(togglebit, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_togglebit_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_togglebit_i(rs, IU5 as i32) +} + +/// `Rd32=togglebit(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(togglebit))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_togglebit_RR(rs: i32, rt: i32) -> i32 { + hexagon_S2_togglebit_r(rs, rt) +} + +/// `Pd4=tstbit(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(tstbit, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_tstbit_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S2_tstbit_i(rs, IU5 as i32) +} + +/// `Pd4=tstbit(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(tstbit))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_tstbit_RR(rs: i32, rt: i32) -> i32 { + hexagon_S2_tstbit_r(rs, rt) +} + +/// `Rdd32=valignb(Rtt32,Rss32,#u3)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(valignb, IU3 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_valignb_PPI(rtt: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU3, 3); + hexagon_S2_valignib(rtt, rss, IU3 as i32) +} + +/// `Rdd32=valignb(Rtt32,Rss32,Pu4)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(valignb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_valignb_PPp(rtt: i64, rss: i64, pu: i32) -> i64 { + hexagon_S2_valignrb(rtt, rss, pu) +} + +/// `Rdd32=vcnegh(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcnegh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vcnegh_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_vcnegh(rss, rt) +} + +/// `Rdd32=vcrotate(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vcrotate))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vcrotate_PR(rss: i64, rt: i32) -> i64 { + hexagon_S2_vcrotate(rss, rt) +} + +/// `Rxx32+=vrcnegh(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrcnegh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcneghacc_PR(rxx: i64, rss: i64, rt: i32) -> i64 { + hexagon_S2_vrcnegh(rxx, rss, rt) +} + +/// `Rd32=vrndwh(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrndwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vrndwh_P(rss: i64) -> i32 { + hexagon_S2_vrndpackwh(rss) +} + +/// `Rd32=vrndwh(Rss32):sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vrndwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vrndwh_P_sat(rss: i64) -> i32 { + hexagon_S2_vrndpackwhs(rss) +} + +/// `Rd32=vsathb(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsathb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vsathb_P(rss: i64) -> i32 { + hexagon_S2_vsathb(rss) +} + +/// `Rdd32=vsathb(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsathb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsathb_P(rss: i64) -> i64 { + hexagon_S2_vsathb_nopack(rss) +} + +/// `Rd32=vsathub(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsathub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vsathub_P(rss: i64) -> i32 { + hexagon_S2_vsathub(rss) +} + +/// `Rdd32=vsathub(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsathub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsathub_P(rss: i64) -> i64 { + hexagon_S2_vsathub_nopack(rss) +} + +/// `Rd32=vsatwh(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsatwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vsatwh_P(rss: i64) -> i32 { + hexagon_S2_vsatwh(rss) +} + +/// `Rdd32=vsatwh(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsatwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsatwh_P(rss: i64) -> i64 { + hexagon_S2_vsatwh_nopack(rss) +} + +/// `Rd32=vsatwuh(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsatwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vsatwuh_P(rss: i64) -> i32 { + hexagon_S2_vsatwuh(rss) +} + +/// `Rdd32=vsatwuh(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsatwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsatwuh_P(rss: i64) -> i64 { + hexagon_S2_vsatwuh_nopack(rss) +} + +/// `Rd32=vsplatb(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsplatb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vsplatb_R(rs: i32) -> i32 { + hexagon_S2_vsplatrb(rs) +} + +/// `Rdd32=vsplath(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsplath))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsplath_R(rs: i32) -> i64 { + hexagon_S2_vsplatrh(rs) +} + +/// `Rdd32=vspliceb(Rss32,Rtt32,#u3)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(vspliceb, IU3 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vspliceb_PPI(rss: i64, rtt: i64) -> i64 { + static_assert_uimm_bits!(IU3, 3); + hexagon_S2_vspliceib(rss, rtt, IU3 as i32) +} + +/// `Rdd32=vspliceb(Rss32,Rtt32,Pu4)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vspliceb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vspliceb_PPp(rss: i64, rtt: i64, pu: i32) -> i64 { + hexagon_S2_vsplicerb(rss, rtt, pu) +} + +/// `Rdd32=vsxtbh(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsxtbh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsxtbh_R(rs: i32) -> i64 { + hexagon_S2_vsxtbh(rs) +} + +/// `Rdd32=vsxthw(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vsxthw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsxthw_R(rs: i32) -> i64 { + hexagon_S2_vsxthw(rs) +} + +/// `Rd32=vtrunehb(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vtrunehb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vtrunehb_P(rss: i64) -> i32 { + hexagon_S2_vtrunehb(rss) +} + +/// `Rdd32=vtrunewh(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vtrunewh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vtrunewh_PP(rss: i64, rtt: i64) -> i64 { + hexagon_S2_vtrunewh(rss, rtt) +} + +/// `Rd32=vtrunohb(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vtrunohb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vtrunohb_P(rss: i64) -> i32 { + hexagon_S2_vtrunohb(rss) +} + +/// `Rdd32=vtrunowh(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vtrunowh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vtrunowh_PP(rss: i64, rtt: i64) -> i64 { + hexagon_S2_vtrunowh(rss, rtt) +} + +/// `Rdd32=vzxtbh(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vzxtbh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vzxtbh_R(rs: i32) -> i64 { + hexagon_S2_vzxtbh(rs) +} + +/// `Rdd32=vzxthw(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vzxthw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vzxthw_R(rs: i32) -> i64 { + hexagon_S2_vzxthw(rs) +} + +/// `Rd32=add(Rs32,add(Ru32,#s6))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(add, IS6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_add_RRI(rs: i32, ru: i32) -> i32 { + static_assert_simm_bits!(IS6, 6); + hexagon_S4_addaddi(rs, ru, IS6) +} + +/// `Rx32=add(#u8,asl(Rx32,#U5))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0, 2)] +#[cfg_attr(test, assert_instr(add, IU8 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_asl_IRI(rx: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + static_assert_uimm_bits!(IU5, 5); + hexagon_S4_addi_asl_ri(IU8 as i32, rx, IU5 as i32) +} + +/// `Rx32=add(#u8,lsr(Rx32,#U5))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0, 2)] +#[cfg_attr(test, assert_instr(add, IU8 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_lsr_IRI(rx: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + static_assert_uimm_bits!(IU5, 5); + hexagon_S4_addi_lsr_ri(IU8 as i32, rx, IU5 as i32) +} + +/// `Rx32=and(#u8,asl(Rx32,#U5))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0, 2)] +#[cfg_attr(test, assert_instr(and, IU8 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_and_asl_IRI(rx: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + static_assert_uimm_bits!(IU5, 5); + hexagon_S4_andi_asl_ri(IU8 as i32, rx, IU5 as i32) +} + +/// `Rx32=and(#u8,lsr(Rx32,#U5))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0, 2)] +#[cfg_attr(test, assert_instr(and, IU8 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_and_lsr_IRI(rx: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + static_assert_uimm_bits!(IU5, 5); + hexagon_S4_andi_lsr_ri(IU8 as i32, rx, IU5 as i32) +} + +/// `Rd32=add(clb(Rs32),#s6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(add, IS6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_clb_RI(rs: i32) -> i32 { + static_assert_simm_bits!(IS6, 6); + hexagon_S4_clbaddi(rs, IS6) +} + +/// `Rd32=add(clb(Rss32),#s6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(add, IS6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_clb_PI(rss: i64) -> i32 { + static_assert_simm_bits!(IS6, 6); + hexagon_S4_clbpaddi(rss, IS6) +} + +/// `Rd32=normamt(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(normamt))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_normamt_P(rss: i64) -> i32 { + hexagon_S4_clbpnorm(rss) +} + +/// `Rd32=extract(Rs32,#u5,#U5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1, 2)] +#[cfg_attr(test, assert_instr(extract, IU5 = 0, IU5_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_extract_RII(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + static_assert_uimm_bits!(IU5_2, 5); + hexagon_S4_extract(rs, IU5 as i32, IU5_2 as i32) +} + +/// `Rd32=extract(Rs32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(extract))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_extract_RP(rs: i32, rtt: i64) -> i32 { + hexagon_S4_extract_rp(rs, rtt) +} + +/// `Rdd32=extract(Rss32,#u6,#U6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1, 2)] +#[cfg_attr(test, assert_instr(extract, IU6 = 0, IU6_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_extract_PII(rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + static_assert_uimm_bits!(IU6_2, 6); + hexagon_S4_extractp(rss, IU6 as i32, IU6_2 as i32) +} + +/// `Rdd32=extract(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(extract))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_extract_PP(rss: i64, rtt: i64) -> i64 { + hexagon_S4_extractp_rp(rss, rtt) +} + +/// `Rd32=lsl(#s6,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0)] +#[cfg_attr(test, assert_instr(lsl, IS6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_lsl_IR(rt: i32) -> i32 { + static_assert_simm_bits!(IS6, 6); + hexagon_S4_lsli(IS6, rt) +} + +/// `Pd4=!tstbit(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_tstbit_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S4_ntstbit_i(rs, IU5 as i32) +} + +/// `Pd4=!tstbit(Rs32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_tstbit_RR(rs: i32, rt: i32) -> i32 { + hexagon_S4_ntstbit_r(rs, rt) +} + +/// `Rx32|=and(Rs32,#s10)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(and, IS10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_andor_RI(rx: i32, rs: i32) -> i32 { + static_assert_simm_bits!(IS10, 10); + hexagon_S4_or_andi(rx, rs, IS10) +} + +/// `Rx32=or(Ru32,and(Rx32,#s10))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(or, IS10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_or_and_RRI(ru: i32, rx: i32) -> i32 { + static_assert_simm_bits!(IS10, 10); + hexagon_S4_or_andix(ru, rx, IS10) +} + +/// `Rx32|=or(Rs32,#s10)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(or, IS10 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_oror_RI(rx: i32, rs: i32) -> i32 { + static_assert_simm_bits!(IS10, 10); + hexagon_S4_or_ori(rx, rs, IS10) +} + +/// `Rx32=or(#u8,asl(Rx32,#U5))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0, 2)] +#[cfg_attr(test, assert_instr(or, IU8 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_or_asl_IRI(rx: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + static_assert_uimm_bits!(IU5, 5); + hexagon_S4_ori_asl_ri(IU8 as i32, rx, IU5 as i32) +} + +/// `Rx32=or(#u8,lsr(Rx32,#U5))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0, 2)] +#[cfg_attr(test, assert_instr(or, IU8 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_or_lsr_IRI(rx: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + static_assert_uimm_bits!(IU5, 5); + hexagon_S4_ori_lsr_ri(IU8 as i32, rx, IU5 as i32) +} + +/// `Rd32=parity(Rs32,Rt32)` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(parity))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_parity_RR(rs: i32, rt: i32) -> i32 { + hexagon_S4_parity(rs, rt) +} + +/// `Rd32=add(Rs32,sub(#s6,Ru32))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(add, IS6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_add_sub_RIR(rs: i32, ru: i32) -> i32 { + static_assert_simm_bits!(IS6, 6); + hexagon_S4_subaddi(rs, IS6, ru) +} + +/// `Rx32=sub(#u8,asl(Rx32,#U5))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0, 2)] +#[cfg_attr(test, assert_instr(sub, IU8 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_asl_IRI(rx: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + static_assert_uimm_bits!(IU5, 5); + hexagon_S4_subi_asl_ri(IU8 as i32, rx, IU5 as i32) +} + +/// `Rx32=sub(#u8,lsr(Rx32,#U5))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(0, 2)] +#[cfg_attr(test, assert_instr(sub, IU8 = 0, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_sub_lsr_IRI(rx: i32) -> i32 { + static_assert_uimm_bits!(IU8, 8); + static_assert_uimm_bits!(IU5, 5); + hexagon_S4_subi_lsr_ri(IU8 as i32, rx, IU5 as i32) +} + +/// `Rdd32=vrcrotate(Rss32,Rt32,#u2)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(vrcrotate, IU2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcrotate_PRI(rss: i64, rt: i32) -> i64 { + static_assert_uimm_bits!(IU2, 2); + hexagon_S4_vrcrotate(rss, rt, IU2 as i32) +} + +/// `Rxx32+=vrcrotate(Rss32,Rt32,#u2)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(3)] +#[cfg_attr(test, assert_instr(vrcrotate, IU2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vrcrotateacc_PRI(rxx: i64, rss: i64, rt: i32) -> i64 { + static_assert_uimm_bits!(IU2, 2); + hexagon_S4_vrcrotate_acc(rxx, rss, rt, IU2 as i32) +} + +/// `Rdd32=vxaddsubh(Rss32,Rtt32):sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vxaddsubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vxaddsubh_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_S4_vxaddsubh(rss, rtt) +} + +/// `Rdd32=vxaddsubh(Rss32,Rtt32):rnd:>>1:sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vxaddsubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vxaddsubh_PP_rnd_rs1_sat(rss: i64, rtt: i64) -> i64 { + hexagon_S4_vxaddsubhr(rss, rtt) +} + +/// `Rdd32=vxaddsubw(Rss32,Rtt32):sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vxaddsubw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vxaddsubw_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_S4_vxaddsubw(rss, rtt) +} + +/// `Rdd32=vxsubaddh(Rss32,Rtt32):sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vxsubaddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vxsubaddh_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_S4_vxsubaddh(rss, rtt) +} + +/// `Rdd32=vxsubaddh(Rss32,Rtt32):rnd:>>1:sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vxsubaddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vxsubaddh_PP_rnd_rs1_sat(rss: i64, rtt: i64) -> i64 { + hexagon_S4_vxsubaddhr(rss, rtt) +} + +/// `Rdd32=vxsubaddw(Rss32,Rtt32):sat` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(vxsubaddw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vxsubaddw_PP_sat(rss: i64, rtt: i64) -> i64 { + hexagon_S4_vxsubaddw(rss, rtt) +} + +/// `Rd32=vasrhub(Rss32,#u4):rnd:sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vasrhub, IU4 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vasrhub_PI_rnd_sat(rss: i64) -> i32 { + static_assert_uimm_bits!(IU4, 4); + hexagon_S5_asrhub_rnd_sat_goodsyntax(rss, IU4 as i32) +} + +/// `Rd32=vasrhub(Rss32,#u4):sat` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vasrhub, IU4 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_vasrhub_PI_sat(rss: i64) -> i32 { + static_assert_uimm_bits!(IU4, 4); + hexagon_S5_asrhub_sat(rss, IU4 as i32) +} + +/// `Rd32=popcount(Rss32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(test, assert_instr(popcount))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_popcount_P(rss: i64) -> i32 { + hexagon_S5_popcountp(rss) +} + +/// `Rdd32=vasrh(Rss32,#u4):rnd` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT0123 +#[inline(always)] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vasrh, IU4 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vasrh_PI_rnd(rss: i64) -> i64 { + static_assert_uimm_bits!(IU4, 4); + hexagon_S5_vasrhrnd_goodsyntax(rss, IU4 as i32) +} + +/// `dccleana(Rs32)` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(test, assert_instr(dccleana))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_dccleana_A(rs: i32) { + hexagon_Y2_dccleana(rs) +} + +/// `dccleaninva(Rs32)` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(test, assert_instr(dccleaninva))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_dccleaninva_A(rs: i32) { + hexagon_Y2_dccleaninva(rs) +} + +/// `dcfetch(Rs32)` +/// +/// Instruction Type: MAPPING +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(test, assert_instr(dcfetch))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_dcfetch_A(rs: i32) { + hexagon_Y2_dcfetch(rs) +} + +/// `dcinva(Rs32)` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(test, assert_instr(dcinva))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_dcinva_A(rs: i32) { + hexagon_Y2_dcinva(rs) +} + +/// `dczeroa(Rs32)` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(test, assert_instr(dczeroa))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_dczeroa_A(rs: i32) { + hexagon_Y2_dczeroa(rs) +} + +/// `l2fetch(Rs32,Rt32)` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(test, assert_instr(l2fetch))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_l2fetch_AR(rs: i32, rt: i32) { + hexagon_Y4_l2fetch(rs, rt) +} + +/// `l2fetch(Rs32,Rtt32)` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(test, assert_instr(l2fetch))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_l2fetch_AP(rs: i32, rtt: i64) { + hexagon_Y5_l2fetch(rs, rtt) +} + +/// `Rdd32=rol(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(rol, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_rol_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S6_rol_i_p(rss, IU6 as i32) +} + +/// `Rxx32+=rol(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(rol, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_rolacc_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S6_rol_i_p_acc(rxx, rss, IU6 as i32) +} + +/// `Rxx32&=rol(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(rol, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_roland_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S6_rol_i_p_and(rxx, rss, IU6 as i32) +} + +/// `Rxx32-=rol(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(rol, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_rolnac_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S6_rol_i_p_nac(rxx, rss, IU6 as i32) +} + +/// `Rxx32|=rol(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(rol, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_rolor_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S6_rol_i_p_or(rxx, rss, IU6 as i32) +} + +/// `Rxx32^=rol(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(rol, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_rolxacc_PI(rxx: i64, rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_S6_rol_i_p_xacc(rxx, rss, IU6 as i32) +} + +/// `Rd32=rol(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(rol, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_rol_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S6_rol_i_r(rs, IU5 as i32) +} + +/// `Rx32+=rol(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(rol, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_rolacc_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S6_rol_i_r_acc(rx, rs, IU5 as i32) +} + +/// `Rx32&=rol(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(rol, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_roland_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S6_rol_i_r_and(rx, rs, IU5 as i32) +} + +/// `Rx32-=rol(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(rol, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_rolnac_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S6_rol_i_r_nac(rx, rs, IU5 as i32) +} + +/// `Rx32|=rol(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(rol, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_rolor_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S6_rol_i_r_or(rx, rs, IU5 as i32) +} + +/// `Rx32^=rol(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V60 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v60"))] +#[rustc_legacy_const_generics(2)] +#[cfg_attr(test, assert_instr(rol, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_rolxacc_RI(rx: i32, rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_S6_rol_i_r_xacc(rx, rs, IU5 as i32) +} + +/// `Rdd32=vabsdiffb(Rtt32,Rss32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V62 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v62"))] +#[cfg_attr(test, assert_instr(vabsdiffb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vabsdiffb_PP(rtt: i64, rss: i64) -> i64 { + hexagon_M6_vabsdiffb(rtt, rss) +} + +/// `Rdd32=vabsdiffub(Rtt32,Rss32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V62 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v62"))] +#[cfg_attr(test, assert_instr(vabsdiffub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vabsdiffub_PP(rtt: i64, rss: i64) -> i64 { + hexagon_M6_vabsdiffub(rtt, rss) +} + +/// `Rdd32=vsplatb(Rs32)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V62 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v62"))] +#[cfg_attr(test, assert_instr(vsplatb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vsplatb_R(rs: i32) -> i64 { + hexagon_S6_vsplatrbp(rs) +} + +/// `Rdd32=vtrunehb(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +/// Requires: V62 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v62"))] +#[cfg_attr(test, assert_instr(vtrunehb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vtrunehb_PP(rss: i64, rtt: i64) -> i64 { + hexagon_S6_vtrunehb_ppp(rss, rtt) +} + +/// `Rdd32=vtrunohb(Rss32,Rtt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +/// Requires: V62 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v62"))] +#[cfg_attr(test, assert_instr(vtrunohb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vtrunohb_PP(rss: i64, rtt: i64) -> i64 { + hexagon_S6_vtrunohb_ppp(rss, rtt) +} + +/// `Pd4=!any8(vcmpb.eq(Rss32,Rtt32))` +/// +/// Instruction Type: ALU64 +/// Execution Slots: SLOT23 +/// Requires: V65 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_p_not_any8_vcmpb_eq_PP(rss: i64, rtt: i64) -> i32 { + hexagon_A6_vcmpbeq_notany(rss, rtt) +} + +/// `Rdd32=dfadd(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V66 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v66"))] +#[cfg_attr(test, assert_instr(dfadd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_dfadd_PP(rss: f64, rtt: f64) -> f64 { + hexagon_F2_dfadd(rss, rtt) +} + +/// `Rdd32=dfsub(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V66 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v66"))] +#[cfg_attr(test, assert_instr(dfsub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_dfsub_PP(rss: f64, rtt: f64) -> f64 { + hexagon_F2_dfsub(rss, rtt) +} + +/// `Rx32-=mpyi(Rs32,Rt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V66 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v66"))] +#[cfg_attr(test, assert_instr(mpyi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mpyinac_RR(rx: i32, rs: i32, rt: i32) -> i32 { + hexagon_M2_mnaci(rx, rs, rt) +} + +/// `Rd32=mask(#u5,#U5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V66 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v66"))] +#[rustc_legacy_const_generics(0, 1)] +#[cfg_attr(test, assert_instr(mask, IU5 = 0, IU5_2 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_mask_II() -> i32 { + static_assert_uimm_bits!(IU5, 5); + static_assert_uimm_bits!(IU5_2, 5); + hexagon_S2_mask(IU5 as i32, IU5_2 as i32) +} + +/// `Rd32=clip(Rs32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(clip, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_clip_RI(rs: i32) -> i32 { + static_assert_uimm_bits!(IU5, 5); + hexagon_A7_clip(rs, IU5 as i32) +} + +/// `Rdd32=cround(Rss32,#u6)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(cround, IU6 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cround_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU6, 6); + hexagon_A7_croundd_ri(rss, IU6 as i32) +} + +/// `Rdd32=cround(Rss32,Rt32)` +/// +/// Instruction Type: S_3op +/// Execution Slots: SLOT23 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cround))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cround_PR(rss: i64, rt: i32) -> i64 { + hexagon_A7_croundd_rr(rss, rt) +} + +/// `Rdd32=vclip(Rss32,#u5)` +/// +/// Instruction Type: S_2op +/// Execution Slots: SLOT23 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[rustc_legacy_const_generics(1)] +#[cfg_attr(test, assert_instr(vclip, IU5 = 0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vclip_PI(rss: i64) -> i64 { + static_assert_uimm_bits!(IU5, 5); + hexagon_A7_vclip(rss, IU5 as i32) +} + +/// `Rdd32=dfmax(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V67 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67"))] +#[cfg_attr(test, assert_instr(dfmax))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_dfmax_PP(rss: f64, rtt: f64) -> f64 { + hexagon_F2_dfmax(rss, rtt) +} + +/// `Rdd32=dfmin(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V67 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67"))] +#[cfg_attr(test, assert_instr(dfmin))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_dfmin_PP(rss: f64, rtt: f64) -> f64 { + hexagon_F2_dfmin(rss, rtt) +} + +/// `Rdd32=dfmpyfix(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V67 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67"))] +#[cfg_attr(test, assert_instr(dfmpyfix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_dfmpyfix_PP(rss: f64, rtt: f64) -> f64 { + hexagon_F2_dfmpyfix(rss, rtt) +} + +/// `Rxx32+=dfmpyhh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V67 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67"))] +#[cfg_attr(test, assert_instr(dfmpyhh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_dfmpyhhacc_PP(rxx: f64, rss: f64, rtt: f64) -> f64 { + hexagon_F2_dfmpyhh(rxx, rss, rtt) +} + +/// `Rxx32+=dfmpylh(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V67 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67"))] +#[cfg_attr(test, assert_instr(dfmpylh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_dfmpylhacc_PP(rxx: f64, rss: f64, rtt: f64) -> f64 { + hexagon_F2_dfmpylh(rxx, rss, rtt) +} + +/// `Rdd32=dfmpyll(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT23 +/// Requires: V67 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67"))] +#[cfg_attr(test, assert_instr(dfmpyll))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_dfmpyll_PP(rss: f64, rtt: f64) -> f64 { + hexagon_F2_dfmpyll(rss, rtt) +} + +/// `Rdd32=cmpyiw(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyiw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyiw_PP(rss: i64, rtt: i64) -> i64 { + hexagon_M7_dcmpyiw(rss, rtt) +} + +/// `Rxx32+=cmpyiw(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyiw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyiwacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M7_dcmpyiw_acc(rxx, rss, rtt) +} + +/// `Rdd32=cmpyiw(Rss32,Rtt32*)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyiw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyiw_PP_conj(rss: i64, rtt: i64) -> i64 { + hexagon_M7_dcmpyiwc(rss, rtt) +} + +/// `Rxx32+=cmpyiw(Rss32,Rtt32*)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyiw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyiwacc_PP_conj(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M7_dcmpyiwc_acc(rxx, rss, rtt) +} + +/// `Rdd32=cmpyrw(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyrw_PP(rss: i64, rtt: i64) -> i64 { + hexagon_M7_dcmpyrw(rss, rtt) +} + +/// `Rxx32+=cmpyrw(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyrwacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M7_dcmpyrw_acc(rxx, rss, rtt) +} + +/// `Rdd32=cmpyrw(Rss32,Rtt32*)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyrw_PP_conj(rss: i64, rtt: i64) -> i64 { + hexagon_M7_dcmpyrwc(rss, rtt) +} + +/// `Rxx32+=cmpyrw(Rss32,Rtt32*)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_cmpyrwacc_PP_conj(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M7_dcmpyrwc_acc(rxx, rss, rtt) +} + +/// `Rdd32=vdmpyw(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(vdmpyw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vdmpyw_PP(rss: i64, rtt: i64) -> i64 { + hexagon_M7_vdmpy(rss, rtt) +} + +/// `Rxx32+=vdmpyw(Rss32,Rtt32)` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(vdmpyw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_P_vdmpywacc_PP(rxx: i64, rss: i64, rtt: i64) -> i64 { + hexagon_M7_vdmpy_acc(rxx, rss, rtt) +} + +/// `Rd32=cmpyiw(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyiw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyiw_PP_s1_sat(rss: i64, rtt: i64) -> i32 { + hexagon_M7_wcmpyiw(rss, rtt) +} + +/// `Rd32=cmpyiw(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyiw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyiw_PP_s1_rnd_sat(rss: i64, rtt: i64) -> i32 { + hexagon_M7_wcmpyiw_rnd(rss, rtt) +} + +/// `Rd32=cmpyiw(Rss32,Rtt32*):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyiw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyiw_PP_conj_s1_sat(rss: i64, rtt: i64) -> i32 { + hexagon_M7_wcmpyiwc(rss, rtt) +} + +/// `Rd32=cmpyiw(Rss32,Rtt32*):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyiw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyiw_PP_conj_s1_rnd_sat(rss: i64, rtt: i64) -> i32 { + hexagon_M7_wcmpyiwc_rnd(rss, rtt) +} + +/// `Rd32=cmpyrw(Rss32,Rtt32):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyrw_PP_s1_sat(rss: i64, rtt: i64) -> i32 { + hexagon_M7_wcmpyrw(rss, rtt) +} + +/// `Rd32=cmpyrw(Rss32,Rtt32):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyrw_PP_s1_rnd_sat(rss: i64, rtt: i64) -> i32 { + hexagon_M7_wcmpyrw_rnd(rss, rtt) +} + +/// `Rd32=cmpyrw(Rss32,Rtt32*):<<1:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyrw_PP_conj_s1_sat(rss: i64, rtt: i64) -> i32 { + hexagon_M7_wcmpyrwc(rss, rtt) +} + +/// `Rd32=cmpyrw(Rss32,Rtt32*):<<1:rnd:sat` +/// +/// Instruction Type: M +/// Execution Slots: SLOT3 +/// Requires: V67, Audio +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v67,audio"))] +#[cfg_attr(test, assert_instr(cmpyrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_cmpyrw_PP_conj_s1_rnd_sat(rss: i64, rtt: i64) -> i32 { + hexagon_M7_wcmpyrwc_rnd(rss, rtt) +} + +/// `dmlink(Rs32,Rt32)` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +/// Requires: V68 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v68"))] +#[cfg_attr(test, assert_instr(dmlink))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_dmlink_AA(rs: i32, rt: i32) { + hexagon_Y6_dmlink(rs, rt) +} + +/// `Rd32=dmpause` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +/// Requires: V68 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v68"))] +#[cfg_attr(test, assert_instr(dmpause))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_dmpause() -> i32 { + hexagon_Y6_dmpause() +} + +/// `Rd32=dmpoll` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +/// Requires: V68 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v68"))] +#[cfg_attr(test, assert_instr(dmpoll))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_dmpoll() -> i32 { + hexagon_Y6_dmpoll() +} + +/// `dmresume(Rs32)` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +/// Requires: V68 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v68"))] +#[cfg_attr(test, assert_instr(dmresume))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_dmresume_A(rs: i32) { + hexagon_Y6_dmresume(rs) +} + +/// `dmstart(Rs32)` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +/// Requires: V68 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v68"))] +#[cfg_attr(test, assert_instr(dmstart))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_dmstart_A(rs: i32) { + hexagon_Y6_dmstart(rs) +} + +/// `Rd32=dmwait` +/// +/// Instruction Type: ST +/// Execution Slots: SLOT0 +/// Requires: V68 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "v68"))] +#[cfg_attr(test, assert_instr(dmwait))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn Q6_R_dmwait() -> i32 { + hexagon_Y6_dmwait() +} diff --git a/library/stdarch/crates/stdarch-gen-hexagon-scalar/Cargo.toml b/library/stdarch/crates/stdarch-gen-hexagon-scalar/Cargo.toml new file mode 100644 index 0000000000000..04bee944f4a91 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-hexagon-scalar/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "stdarch-gen-hexagon-scalar" +version = "0.1.0" +authors = ["The Rust Project Developers"] +license = "MIT OR Apache-2.0" +edition = "2021" + +[dependencies] +regex = "1.10" diff --git a/library/stdarch/crates/stdarch-gen-hexagon-scalar/hexagon_protos.h b/library/stdarch/crates/stdarch-gen-hexagon-scalar/hexagon_protos.h new file mode 100644 index 0000000000000..2642f3c8428d8 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-hexagon-scalar/hexagon_protos.h @@ -0,0 +1,8439 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// Automatically generated file, do not edit! +//===----------------------------------------------------------------------===// + + + +#ifndef __HEXAGON_PROTOS_H_ +#define __HEXAGON_PROTOS_H_ 1 + +/* ========================================================================== + Assembly Syntax: Rd32=abs(Rs32) + C Intrinsic Prototype: Word32 Q6_R_abs_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_abs_R __builtin_HEXAGON_A2_abs + +/* ========================================================================== + Assembly Syntax: Rdd32=abs(Rss32) + C Intrinsic Prototype: Word64 Q6_P_abs_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_abs_P __builtin_HEXAGON_A2_absp + +/* ========================================================================== + Assembly Syntax: Rd32=abs(Rs32):sat + C Intrinsic Prototype: Word32 Q6_R_abs_R_sat(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_abs_R_sat __builtin_HEXAGON_A2_abssat + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_add_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_add_RR __builtin_HEXAGON_A2_add + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.h,Rs32.h):<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RhRh_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RhRh_s16 __builtin_HEXAGON_A2_addh_h16_hh + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.h,Rs32.l):<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RhRl_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RhRl_s16 __builtin_HEXAGON_A2_addh_h16_hl + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RlRh_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRh_s16 __builtin_HEXAGON_A2_addh_h16_lh + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RlRl_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRl_s16 __builtin_HEXAGON_A2_addh_h16_ll + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.h,Rs32.h):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RhRh_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RhRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hh + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.h,Rs32.l):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RhRl_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RhRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_hl + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRh_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_lh + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRl_sat_s16 __builtin_HEXAGON_A2_addh_h16_sat_ll + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.h) + C Intrinsic Prototype: Word32 Q6_R_add_RlRh(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRh __builtin_HEXAGON_A2_addh_l16_hl + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.l) + C Intrinsic Prototype: Word32 Q6_R_add_RlRl(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRl __builtin_HEXAGON_A2_addh_l16_ll + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.h):sat + C Intrinsic Prototype: Word32 Q6_R_add_RlRh_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRh_sat __builtin_HEXAGON_A2_addh_l16_sat_hl + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rt32.l,Rs32.l):sat + C Intrinsic Prototype: Word32 Q6_R_add_RlRl_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_RlRl_sat __builtin_HEXAGON_A2_addh_l16_sat_ll + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rs32,#s16) + C Intrinsic Prototype: Word32 Q6_R_add_RI(Word32 Rs, Word32 Is16) + Instruction Type: ALU32_ADDI + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_add_RI __builtin_HEXAGON_A2_addi + +/* ========================================================================== + Assembly Syntax: Rdd32=add(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_add_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_add_PP __builtin_HEXAGON_A2_addp + +/* ========================================================================== + Assembly Syntax: Rdd32=add(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_add_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_add_PP_sat __builtin_HEXAGON_A2_addpsat + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_add_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_add_RR_sat __builtin_HEXAGON_A2_addsat + +/* ========================================================================== + Assembly Syntax: Rdd32=add(Rs32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_add_RP(Word32 Rs, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_add_RP __builtin_HEXAGON_A2_addsp + +/* ========================================================================== + Assembly Syntax: Rd32=and(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_and_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_and_RR __builtin_HEXAGON_A2_and + +/* ========================================================================== + Assembly Syntax: Rd32=and(Rs32,#s10) + C Intrinsic Prototype: Word32 Q6_R_and_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_and_RI __builtin_HEXAGON_A2_andir + +/* ========================================================================== + Assembly Syntax: Rdd32=and(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_and_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_and_PP __builtin_HEXAGON_A2_andp + +/* ========================================================================== + Assembly Syntax: Rd32=aslh(Rs32) + C Intrinsic Prototype: Word32 Q6_R_aslh_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_aslh_R __builtin_HEXAGON_A2_aslh + +/* ========================================================================== + Assembly Syntax: Rd32=asrh(Rs32) + C Intrinsic Prototype: Word32 Q6_R_asrh_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_asrh_R __builtin_HEXAGON_A2_asrh + +/* ========================================================================== + Assembly Syntax: Rd32=combine(Rt32.h,Rs32.h) + C Intrinsic Prototype: Word32 Q6_R_combine_RhRh(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_combine_RhRh __builtin_HEXAGON_A2_combine_hh + +/* ========================================================================== + Assembly Syntax: Rd32=combine(Rt32.h,Rs32.l) + C Intrinsic Prototype: Word32 Q6_R_combine_RhRl(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_combine_RhRl __builtin_HEXAGON_A2_combine_hl + +/* ========================================================================== + Assembly Syntax: Rd32=combine(Rt32.l,Rs32.h) + C Intrinsic Prototype: Word32 Q6_R_combine_RlRh(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_combine_RlRh __builtin_HEXAGON_A2_combine_lh + +/* ========================================================================== + Assembly Syntax: Rd32=combine(Rt32.l,Rs32.l) + C Intrinsic Prototype: Word32 Q6_R_combine_RlRl(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_combine_RlRl __builtin_HEXAGON_A2_combine_ll + +/* ========================================================================== + Assembly Syntax: Rdd32=combine(#s8,#S8) + C Intrinsic Prototype: Word64 Q6_P_combine_II(Word32 Is8, Word32 IS8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_combine_II __builtin_HEXAGON_A2_combineii + +/* ========================================================================== + Assembly Syntax: Rdd32=combine(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_combine_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_combine_RR __builtin_HEXAGON_A2_combinew + +/* ========================================================================== + Assembly Syntax: Rd32=max(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_max_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_max_RR __builtin_HEXAGON_A2_max + +/* ========================================================================== + Assembly Syntax: Rdd32=max(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_max_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_max_PP __builtin_HEXAGON_A2_maxp + +/* ========================================================================== + Assembly Syntax: Rd32=maxu(Rs32,Rt32) + C Intrinsic Prototype: UWord32 Q6_R_maxu_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_maxu_RR __builtin_HEXAGON_A2_maxu + +/* ========================================================================== + Assembly Syntax: Rdd32=maxu(Rss32,Rtt32) + C Intrinsic Prototype: UWord64 Q6_P_maxu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_maxu_PP __builtin_HEXAGON_A2_maxup + +/* ========================================================================== + Assembly Syntax: Rd32=min(Rt32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_min_RR(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_min_RR __builtin_HEXAGON_A2_min + +/* ========================================================================== + Assembly Syntax: Rdd32=min(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_min_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_min_PP __builtin_HEXAGON_A2_minp + +/* ========================================================================== + Assembly Syntax: Rd32=minu(Rt32,Rs32) + C Intrinsic Prototype: UWord32 Q6_R_minu_RR(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_minu_RR __builtin_HEXAGON_A2_minu + +/* ========================================================================== + Assembly Syntax: Rdd32=minu(Rtt32,Rss32) + C Intrinsic Prototype: UWord64 Q6_P_minu_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_minu_PP __builtin_HEXAGON_A2_minup + +/* ========================================================================== + Assembly Syntax: Rd32=neg(Rs32) + C Intrinsic Prototype: Word32 Q6_R_neg_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_neg_R __builtin_HEXAGON_A2_neg + +/* ========================================================================== + Assembly Syntax: Rdd32=neg(Rss32) + C Intrinsic Prototype: Word64 Q6_P_neg_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_neg_P __builtin_HEXAGON_A2_negp + +/* ========================================================================== + Assembly Syntax: Rd32=neg(Rs32):sat + C Intrinsic Prototype: Word32 Q6_R_neg_R_sat(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_neg_R_sat __builtin_HEXAGON_A2_negsat + +/* ========================================================================== + Assembly Syntax: Rd32=not(Rs32) + C Intrinsic Prototype: Word32 Q6_R_not_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_not_R __builtin_HEXAGON_A2_not + +/* ========================================================================== + Assembly Syntax: Rdd32=not(Rss32) + C Intrinsic Prototype: Word64 Q6_P_not_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_not_P __builtin_HEXAGON_A2_notp + +/* ========================================================================== + Assembly Syntax: Rd32=or(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_or_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_or_RR __builtin_HEXAGON_A2_or + +/* ========================================================================== + Assembly Syntax: Rd32=or(Rs32,#s10) + C Intrinsic Prototype: Word32 Q6_R_or_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_or_RI __builtin_HEXAGON_A2_orir + +/* ========================================================================== + Assembly Syntax: Rdd32=or(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_or_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_or_PP __builtin_HEXAGON_A2_orp + +/* ========================================================================== + Assembly Syntax: Rd32=round(Rss32):sat + C Intrinsic Prototype: Word32 Q6_R_round_P_sat(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_round_P_sat __builtin_HEXAGON_A2_roundsat + +/* ========================================================================== + Assembly Syntax: Rd32=sat(Rss32) + C Intrinsic Prototype: Word32 Q6_R_sat_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sat_P __builtin_HEXAGON_A2_sat + +/* ========================================================================== + Assembly Syntax: Rd32=satb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_satb_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_satb_R __builtin_HEXAGON_A2_satb + +/* ========================================================================== + Assembly Syntax: Rd32=sath(Rs32) + C Intrinsic Prototype: Word32 Q6_R_sath_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sath_R __builtin_HEXAGON_A2_sath + +/* ========================================================================== + Assembly Syntax: Rd32=satub(Rs32) + C Intrinsic Prototype: Word32 Q6_R_satub_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_satub_R __builtin_HEXAGON_A2_satub + +/* ========================================================================== + Assembly Syntax: Rd32=satuh(Rs32) + C Intrinsic Prototype: Word32 Q6_R_satuh_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_satuh_R __builtin_HEXAGON_A2_satuh + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_sub_RR(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_sub_RR __builtin_HEXAGON_A2_sub + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.h,Rs32.h):<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RhRh_s16 __builtin_HEXAGON_A2_subh_h16_hh + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.h,Rs32.l):<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RhRl_s16 __builtin_HEXAGON_A2_subh_h16_hl + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRh_s16 __builtin_HEXAGON_A2_subh_h16_lh + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRl_s16 __builtin_HEXAGON_A2_subh_h16_ll + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.h,Rs32.h):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RhRh_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RhRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hh + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.h,Rs32.l):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RhRl_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RhRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_hl + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRh_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_lh + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):sat:<<16 + C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat_s16(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRl_sat_s16 __builtin_HEXAGON_A2_subh_h16_sat_ll + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h) + C Intrinsic Prototype: Word32 Q6_R_sub_RlRh(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRh __builtin_HEXAGON_A2_subh_l16_hl + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l) + C Intrinsic Prototype: Word32 Q6_R_sub_RlRl(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRl __builtin_HEXAGON_A2_subh_l16_ll + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.h):sat + C Intrinsic Prototype: Word32 Q6_R_sub_RlRh_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRh_sat __builtin_HEXAGON_A2_subh_l16_sat_hl + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32.l,Rs32.l):sat + C Intrinsic Prototype: Word32 Q6_R_sub_RlRl_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_RlRl_sat __builtin_HEXAGON_A2_subh_l16_sat_ll + +/* ========================================================================== + Assembly Syntax: Rdd32=sub(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_sub_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_sub_PP __builtin_HEXAGON_A2_subp + +/* ========================================================================== + Assembly Syntax: Rd32=sub(#s10,Rs32) + C Intrinsic Prototype: Word32 Q6_R_sub_IR(Word32 Is10, Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_sub_IR __builtin_HEXAGON_A2_subri + +/* ========================================================================== + Assembly Syntax: Rd32=sub(Rt32,Rs32):sat + C Intrinsic Prototype: Word32 Q6_R_sub_RR_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_sub_RR_sat __builtin_HEXAGON_A2_subsat + +/* ========================================================================== + Assembly Syntax: Rd32=vaddh(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_vaddh_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vaddh_RR __builtin_HEXAGON_A2_svaddh + +/* ========================================================================== + Assembly Syntax: Rd32=vaddh(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_vaddh_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vaddh_RR_sat __builtin_HEXAGON_A2_svaddhs + +/* ========================================================================== + Assembly Syntax: Rd32=vadduh(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_vadduh_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vadduh_RR_sat __builtin_HEXAGON_A2_svadduhs + +/* ========================================================================== + Assembly Syntax: Rd32=vavgh(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_vavgh_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vavgh_RR __builtin_HEXAGON_A2_svavgh + +/* ========================================================================== + Assembly Syntax: Rd32=vavgh(Rs32,Rt32):rnd + C Intrinsic Prototype: Word32 Q6_R_vavgh_RR_rnd(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vavgh_RR_rnd __builtin_HEXAGON_A2_svavghs + +/* ========================================================================== + Assembly Syntax: Rd32=vnavgh(Rt32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_vnavgh_RR(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vnavgh_RR __builtin_HEXAGON_A2_svnavgh + +/* ========================================================================== + Assembly Syntax: Rd32=vsubh(Rt32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_vsubh_RR(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vsubh_RR __builtin_HEXAGON_A2_svsubh + +/* ========================================================================== + Assembly Syntax: Rd32=vsubh(Rt32,Rs32):sat + C Intrinsic Prototype: Word32 Q6_R_vsubh_RR_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vsubh_RR_sat __builtin_HEXAGON_A2_svsubhs + +/* ========================================================================== + Assembly Syntax: Rd32=vsubuh(Rt32,Rs32):sat + C Intrinsic Prototype: Word32 Q6_R_vsubuh_RR_sat(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vsubuh_RR_sat __builtin_HEXAGON_A2_svsubuhs + +/* ========================================================================== + Assembly Syntax: Rd32=swiz(Rs32) + C Intrinsic Prototype: Word32 Q6_R_swiz_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_swiz_R __builtin_HEXAGON_A2_swiz + +/* ========================================================================== + Assembly Syntax: Rd32=sxtb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_sxtb_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_sxtb_R __builtin_HEXAGON_A2_sxtb + +/* ========================================================================== + Assembly Syntax: Rd32=sxth(Rs32) + C Intrinsic Prototype: Word32 Q6_R_sxth_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_sxth_R __builtin_HEXAGON_A2_sxth + +/* ========================================================================== + Assembly Syntax: Rdd32=sxtw(Rs32) + C Intrinsic Prototype: Word64 Q6_P_sxtw_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_sxtw_R __builtin_HEXAGON_A2_sxtw + +/* ========================================================================== + Assembly Syntax: Rd32=Rs32 + C Intrinsic Prototype: Word32 Q6_R_equals_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_equals_R __builtin_HEXAGON_A2_tfr + +/* ========================================================================== + Assembly Syntax: Rx32.h=#u16 + C Intrinsic Prototype: Word32 Q6_Rh_equals_I(Word32 Rx, Word32 Iu16) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Rh_equals_I __builtin_HEXAGON_A2_tfrih + +/* ========================================================================== + Assembly Syntax: Rx32.l=#u16 + C Intrinsic Prototype: Word32 Q6_Rl_equals_I(Word32 Rx, Word32 Iu16) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Rl_equals_I __builtin_HEXAGON_A2_tfril + +/* ========================================================================== + Assembly Syntax: Rdd32=Rss32 + C Intrinsic Prototype: Word64 Q6_P_equals_P(Word64 Rss) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_equals_P __builtin_HEXAGON_A2_tfrp + +/* ========================================================================== + Assembly Syntax: Rdd32=#s8 + C Intrinsic Prototype: Word64 Q6_P_equals_I(Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_equals_I __builtin_HEXAGON_A2_tfrpi + +/* ========================================================================== + Assembly Syntax: Rd32=#s16 + C Intrinsic Prototype: Word32 Q6_R_equals_I(Word32 Is16) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_equals_I __builtin_HEXAGON_A2_tfrsi + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsh(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsh_P __builtin_HEXAGON_A2_vabsh + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsh(Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vabsh_P_sat(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsh_P_sat __builtin_HEXAGON_A2_vabshsat + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsw(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsw_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsw_P __builtin_HEXAGON_A2_vabsw + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsw(Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vabsw_P_sat(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsw_P_sat __builtin_HEXAGON_A2_vabswsat + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddb(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vaddb_PP(Word64 Rss, Word64 Rtt) + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_vaddb_PP __builtin_HEXAGON_A2_vaddb_map + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vaddh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddh_PP __builtin_HEXAGON_A2_vaddh + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vaddh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddh_PP_sat __builtin_HEXAGON_A2_vaddhs + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vaddub_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddub_PP __builtin_HEXAGON_A2_vaddub + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddub(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vaddub_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddub_PP_sat __builtin_HEXAGON_A2_vaddubs + +/* ========================================================================== + Assembly Syntax: Rdd32=vadduh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vadduh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vadduh_PP_sat __builtin_HEXAGON_A2_vadduhs + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vaddw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddw_PP __builtin_HEXAGON_A2_vaddw + +/* ========================================================================== + Assembly Syntax: Rdd32=vaddw(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vaddw_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaddw_PP_sat __builtin_HEXAGON_A2_vaddws + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vavgh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgh_PP __builtin_HEXAGON_A2_vavgh + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32):crnd + C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_crnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgh_PP_crnd __builtin_HEXAGON_A2_vavghcr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgh(Rss32,Rtt32):rnd + C Intrinsic Prototype: Word64 Q6_P_vavgh_PP_rnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgh_PP_rnd __builtin_HEXAGON_A2_vavghr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vavgub_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgub_PP __builtin_HEXAGON_A2_vavgub + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgub(Rss32,Rtt32):rnd + C Intrinsic Prototype: Word64 Q6_P_vavgub_PP_rnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgub_PP_rnd __builtin_HEXAGON_A2_vavgubr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavguh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vavguh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavguh_PP __builtin_HEXAGON_A2_vavguh + +/* ========================================================================== + Assembly Syntax: Rdd32=vavguh(Rss32,Rtt32):rnd + C Intrinsic Prototype: Word64 Q6_P_vavguh_PP_rnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavguh_PP_rnd __builtin_HEXAGON_A2_vavguhr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavguw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vavguw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavguw_PP __builtin_HEXAGON_A2_vavguw + +/* ========================================================================== + Assembly Syntax: Rdd32=vavguw(Rss32,Rtt32):rnd + C Intrinsic Prototype: Word64 Q6_P_vavguw_PP_rnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavguw_PP_rnd __builtin_HEXAGON_A2_vavguwr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vavgw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgw_PP __builtin_HEXAGON_A2_vavgw + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32):crnd + C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_crnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgw_PP_crnd __builtin_HEXAGON_A2_vavgwcr + +/* ========================================================================== + Assembly Syntax: Rdd32=vavgw(Rss32,Rtt32):rnd + C Intrinsic Prototype: Word64 Q6_P_vavgw_PP_rnd(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vavgw_PP_rnd __builtin_HEXAGON_A2_vavgwr + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.eq(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_eq_PP __builtin_HEXAGON_A2_vcmpbeq + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.gtu(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_gtu_PP __builtin_HEXAGON_A2_vcmpbgtu + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.eq(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_eq_PP __builtin_HEXAGON_A2_vcmpheq + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.gt(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_gt_PP __builtin_HEXAGON_A2_vcmphgt + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.gtu(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_gtu_PP __builtin_HEXAGON_A2_vcmphgtu + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.eq(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_eq_PP __builtin_HEXAGON_A2_vcmpweq + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.gt(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_gt_PP __builtin_HEXAGON_A2_vcmpwgt + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.gtu(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_gtu_PP __builtin_HEXAGON_A2_vcmpwgtu + +/* ========================================================================== + Assembly Syntax: Rdd32=vconj(Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vconj_P_sat(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vconj_P_sat __builtin_HEXAGON_A2_vconj + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxb(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxb_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxb_PP __builtin_HEXAGON_A2_vmaxb + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxh_PP __builtin_HEXAGON_A2_vmaxh + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxub(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxub_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxub_PP __builtin_HEXAGON_A2_vmaxub + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxuh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxuh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxuh_PP __builtin_HEXAGON_A2_vmaxuh + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxuw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxuw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxuw_PP __builtin_HEXAGON_A2_vmaxuw + +/* ========================================================================== + Assembly Syntax: Rdd32=vmaxw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vmaxw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmaxw_PP __builtin_HEXAGON_A2_vmaxw + +/* ========================================================================== + Assembly Syntax: Rdd32=vminb(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminb_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminb_PP __builtin_HEXAGON_A2_vminb + +/* ========================================================================== + Assembly Syntax: Rdd32=vminh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminh_PP __builtin_HEXAGON_A2_vminh + +/* ========================================================================== + Assembly Syntax: Rdd32=vminub(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminub_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminub_PP __builtin_HEXAGON_A2_vminub + +/* ========================================================================== + Assembly Syntax: Rdd32=vminuh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminuh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminuh_PP __builtin_HEXAGON_A2_vminuh + +/* ========================================================================== + Assembly Syntax: Rdd32=vminuw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminuw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminuw_PP __builtin_HEXAGON_A2_vminuw + +/* ========================================================================== + Assembly Syntax: Rdd32=vminw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vminw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vminw_PP __builtin_HEXAGON_A2_vminw + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgh_PP __builtin_HEXAGON_A2_vnavgh + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32):crnd:sat + C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_crnd_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgh_PP_crnd_sat __builtin_HEXAGON_A2_vnavghcr + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgh(Rtt32,Rss32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vnavgh_PP_rnd_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgh_PP_rnd_sat __builtin_HEXAGON_A2_vnavghr + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgw_PP __builtin_HEXAGON_A2_vnavgw + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32):crnd:sat + C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_crnd_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgw_PP_crnd_sat __builtin_HEXAGON_A2_vnavgwcr + +/* ========================================================================== + Assembly Syntax: Rdd32=vnavgw(Rtt32,Rss32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vnavgw_PP_rnd_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vnavgw_PP_rnd_sat __builtin_HEXAGON_A2_vnavgwr + +/* ========================================================================== + Assembly Syntax: Rdd32=vraddub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vraddub_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vraddub_PP __builtin_HEXAGON_A2_vraddub + +/* ========================================================================== + Assembly Syntax: Rxx32+=vraddub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vraddubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vraddubacc_PP __builtin_HEXAGON_A2_vraddub_acc + +/* ========================================================================== + Assembly Syntax: Rdd32=vrsadub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrsadub_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrsadub_PP __builtin_HEXAGON_A2_vrsadub + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrsadub(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrsadubacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrsadubacc_PP __builtin_HEXAGON_A2_vrsadub_acc + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubb(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vsubb_PP(Word64 Rss, Word64 Rtt) + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_vsubb_PP __builtin_HEXAGON_A2_vsubb_map + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsubh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubh_PP __builtin_HEXAGON_A2_vsubh + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubh(Rtt32,Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vsubh_PP_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubh_PP_sat __builtin_HEXAGON_A2_vsubhs + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubub(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsubub_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubub_PP __builtin_HEXAGON_A2_vsubub + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubub(Rtt32,Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vsubub_PP_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubub_PP_sat __builtin_HEXAGON_A2_vsububs + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubuh(Rtt32,Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vsubuh_PP_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubuh_PP_sat __builtin_HEXAGON_A2_vsubuhs + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsubw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubw_PP __builtin_HEXAGON_A2_vsubw + +/* ========================================================================== + Assembly Syntax: Rdd32=vsubw(Rtt32,Rss32):sat + C Intrinsic Prototype: Word64 Q6_P_vsubw_PP_sat(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsubw_PP_sat __builtin_HEXAGON_A2_vsubws + +/* ========================================================================== + Assembly Syntax: Rd32=xor(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_xor_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_xor_RR __builtin_HEXAGON_A2_xor + +/* ========================================================================== + Assembly Syntax: Rdd32=xor(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_xor_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_xor_PP __builtin_HEXAGON_A2_xorp + +/* ========================================================================== + Assembly Syntax: Rd32=zxtb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_zxtb_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_zxtb_R __builtin_HEXAGON_A2_zxtb + +/* ========================================================================== + Assembly Syntax: Rd32=zxth(Rs32) + C Intrinsic Prototype: Word32 Q6_R_zxth_R(Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_zxth_R __builtin_HEXAGON_A2_zxth + +/* ========================================================================== + Assembly Syntax: Rd32=and(Rt32,~Rs32) + C Intrinsic Prototype: Word32 Q6_R_and_RnR(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_and_RnR __builtin_HEXAGON_A4_andn + +/* ========================================================================== + Assembly Syntax: Rdd32=and(Rtt32,~Rss32) + C Intrinsic Prototype: Word64 Q6_P_and_PnP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_and_PnP __builtin_HEXAGON_A4_andnp + +/* ========================================================================== + Assembly Syntax: Rdd32=bitsplit(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_bitsplit_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_bitsplit_RR __builtin_HEXAGON_A4_bitsplit + +/* ========================================================================== + Assembly Syntax: Rdd32=bitsplit(Rs32,#u5) + C Intrinsic Prototype: Word64 Q6_P_bitsplit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_bitsplit_RI __builtin_HEXAGON_A4_bitspliti + +/* ========================================================================== + Assembly Syntax: Pd4=boundscheck(Rs32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_boundscheck_RP(Word32 Rs, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_boundscheck_RP __builtin_HEXAGON_A4_boundscheck + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.eq(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_eq_RR __builtin_HEXAGON_A4_cmpbeq + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.eq(Rs32,#u8) + C Intrinsic Prototype: Byte Q6_p_cmpb_eq_RI(Word32 Rs, Word32 Iu8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_eq_RI __builtin_HEXAGON_A4_cmpbeqi + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.gt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_gt_RR __builtin_HEXAGON_A4_cmpbgt + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.gt(Rs32,#s8) + C Intrinsic Prototype: Byte Q6_p_cmpb_gt_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_gt_RI __builtin_HEXAGON_A4_cmpbgti + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.gtu(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_gtu_RR __builtin_HEXAGON_A4_cmpbgtu + +/* ========================================================================== + Assembly Syntax: Pd4=cmpb.gtu(Rs32,#u7) + C Intrinsic Prototype: Byte Q6_p_cmpb_gtu_RI(Word32 Rs, Word32 Iu7) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmpb_gtu_RI __builtin_HEXAGON_A4_cmpbgtui + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.eq(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmph_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_eq_RR __builtin_HEXAGON_A4_cmpheq + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.eq(Rs32,#s8) + C Intrinsic Prototype: Byte Q6_p_cmph_eq_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_eq_RI __builtin_HEXAGON_A4_cmpheqi + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.gt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmph_gt_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_gt_RR __builtin_HEXAGON_A4_cmphgt + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.gt(Rs32,#s8) + C Intrinsic Prototype: Byte Q6_p_cmph_gt_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_gt_RI __builtin_HEXAGON_A4_cmphgti + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.gtu(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_gtu_RR __builtin_HEXAGON_A4_cmphgtu + +/* ========================================================================== + Assembly Syntax: Pd4=cmph.gtu(Rs32,#u7) + C Intrinsic Prototype: Byte Q6_p_cmph_gtu_RI(Word32 Rs, Word32 Iu7) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmph_gtu_RI __builtin_HEXAGON_A4_cmphgtui + +/* ========================================================================== + Assembly Syntax: Rdd32=combine(#s8,Rs32) + C Intrinsic Prototype: Word64 Q6_P_combine_IR(Word32 Is8, Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_combine_IR __builtin_HEXAGON_A4_combineir + +/* ========================================================================== + Assembly Syntax: Rdd32=combine(Rs32,#s8) + C Intrinsic Prototype: Word64 Q6_P_combine_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_combine_RI __builtin_HEXAGON_A4_combineri + +/* ========================================================================== + Assembly Syntax: Rd32=cround(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_cround_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cround_RI __builtin_HEXAGON_A4_cround_ri + +/* ========================================================================== + Assembly Syntax: Rd32=cround(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_cround_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cround_RR __builtin_HEXAGON_A4_cround_rr + +/* ========================================================================== + Assembly Syntax: Rd32=modwrap(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_modwrap_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_modwrap_RR __builtin_HEXAGON_A4_modwrapu + +/* ========================================================================== + Assembly Syntax: Rd32=or(Rt32,~Rs32) + C Intrinsic Prototype: Word32 Q6_R_or_RnR(Word32 Rt, Word32 Rs) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_or_RnR __builtin_HEXAGON_A4_orn + +/* ========================================================================== + Assembly Syntax: Rdd32=or(Rtt32,~Rss32) + C Intrinsic Prototype: Word64 Q6_P_or_PnP(Word64 Rtt, Word64 Rss) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_or_PnP __builtin_HEXAGON_A4_ornp + +/* ========================================================================== + Assembly Syntax: Rd32=cmp.eq(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_cmp_eq_RR __builtin_HEXAGON_A4_rcmpeq + +/* ========================================================================== + Assembly Syntax: Rd32=cmp.eq(Rs32,#s8) + C Intrinsic Prototype: Word32 Q6_R_cmp_eq_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_cmp_eq_RI __builtin_HEXAGON_A4_rcmpeqi + +/* ========================================================================== + Assembly Syntax: Rd32=!cmp.eq(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_not_cmp_eq_RR __builtin_HEXAGON_A4_rcmpneq + +/* ========================================================================== + Assembly Syntax: Rd32=!cmp.eq(Rs32,#s8) + C Intrinsic Prototype: Word32 Q6_R_not_cmp_eq_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_not_cmp_eq_RI __builtin_HEXAGON_A4_rcmpneqi + +/* ========================================================================== + Assembly Syntax: Rd32=round(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_round_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_round_RI __builtin_HEXAGON_A4_round_ri + +/* ========================================================================== + Assembly Syntax: Rd32=round(Rs32,#u5):sat + C Intrinsic Prototype: Word32 Q6_R_round_RI_sat(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_round_RI_sat __builtin_HEXAGON_A4_round_ri_sat + +/* ========================================================================== + Assembly Syntax: Rd32=round(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_round_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_round_RR __builtin_HEXAGON_A4_round_rr + +/* ========================================================================== + Assembly Syntax: Rd32=round(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_round_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_round_RR_sat __builtin_HEXAGON_A4_round_rr_sat + +/* ========================================================================== + Assembly Syntax: Pd4=tlbmatch(Rss32,Rt32) + C Intrinsic Prototype: Byte Q6_p_tlbmatch_PR(Word64 Rss, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_tlbmatch_PR __builtin_HEXAGON_A4_tlbmatch + +/* ========================================================================== + Assembly Syntax: Pd4=any8(vcmpb.eq(Rss32,Rtt32)) + C Intrinsic Prototype: Byte Q6_p_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_any8_vcmpb_eq_PP __builtin_HEXAGON_A4_vcmpbeq_any + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.eq(Rss32,#u8) + C Intrinsic Prototype: Byte Q6_p_vcmpb_eq_PI(Word64 Rss, Word32 Iu8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_eq_PI __builtin_HEXAGON_A4_vcmpbeqi + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.gt(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_gt_PP __builtin_HEXAGON_A4_vcmpbgt + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.gt(Rss32,#s8) + C Intrinsic Prototype: Byte Q6_p_vcmpb_gt_PI(Word64 Rss, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_gt_PI __builtin_HEXAGON_A4_vcmpbgti + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpb.gtu(Rss32,#u7) + C Intrinsic Prototype: Byte Q6_p_vcmpb_gtu_PI(Word64 Rss, Word32 Iu7) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpb_gtu_PI __builtin_HEXAGON_A4_vcmpbgtui + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.eq(Rss32,#s8) + C Intrinsic Prototype: Byte Q6_p_vcmph_eq_PI(Word64 Rss, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_eq_PI __builtin_HEXAGON_A4_vcmpheqi + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.gt(Rss32,#s8) + C Intrinsic Prototype: Byte Q6_p_vcmph_gt_PI(Word64 Rss, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_gt_PI __builtin_HEXAGON_A4_vcmphgti + +/* ========================================================================== + Assembly Syntax: Pd4=vcmph.gtu(Rss32,#u7) + C Intrinsic Prototype: Byte Q6_p_vcmph_gtu_PI(Word64 Rss, Word32 Iu7) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmph_gtu_PI __builtin_HEXAGON_A4_vcmphgtui + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.eq(Rss32,#s8) + C Intrinsic Prototype: Byte Q6_p_vcmpw_eq_PI(Word64 Rss, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_eq_PI __builtin_HEXAGON_A4_vcmpweqi + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.gt(Rss32,#s8) + C Intrinsic Prototype: Byte Q6_p_vcmpw_gt_PI(Word64 Rss, Word32 Is8) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_gt_PI __builtin_HEXAGON_A4_vcmpwgti + +/* ========================================================================== + Assembly Syntax: Pd4=vcmpw.gtu(Rss32,#u7) + C Intrinsic Prototype: Byte Q6_p_vcmpw_gtu_PI(Word64 Rss, Word32 Iu7) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_vcmpw_gtu_PI __builtin_HEXAGON_A4_vcmpwgtui + +/* ========================================================================== + Assembly Syntax: Rxx32=vrmaxh(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrmaxh_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmaxh_PR __builtin_HEXAGON_A4_vrmaxh + +/* ========================================================================== + Assembly Syntax: Rxx32=vrmaxuh(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrmaxuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmaxuh_PR __builtin_HEXAGON_A4_vrmaxuh + +/* ========================================================================== + Assembly Syntax: Rxx32=vrmaxuw(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrmaxuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmaxuw_PR __builtin_HEXAGON_A4_vrmaxuw + +/* ========================================================================== + Assembly Syntax: Rxx32=vrmaxw(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrmaxw_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmaxw_PR __builtin_HEXAGON_A4_vrmaxw + +/* ========================================================================== + Assembly Syntax: Rxx32=vrminh(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrminh_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrminh_PR __builtin_HEXAGON_A4_vrminh + +/* ========================================================================== + Assembly Syntax: Rxx32=vrminuh(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrminuh_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrminuh_PR __builtin_HEXAGON_A4_vrminuh + +/* ========================================================================== + Assembly Syntax: Rxx32=vrminuw(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrminuw_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrminuw_PR __builtin_HEXAGON_A4_vrminuw + +/* ========================================================================== + Assembly Syntax: Rxx32=vrminw(Rss32,Ru32) + C Intrinsic Prototype: Word64 Q6_P_vrminw_PR(Word64 Rxx, Word64 Rss, Word32 Ru) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrminw_PR __builtin_HEXAGON_A4_vrminw + +/* ========================================================================== + Assembly Syntax: Rd32=vaddhub(Rss32,Rtt32):sat + C Intrinsic Prototype: Word32 Q6_R_vaddhub_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vaddhub_PP_sat __builtin_HEXAGON_A5_vaddhubs + +/* ========================================================================== + Assembly Syntax: Pd4=all8(Ps4) + C Intrinsic Prototype: Byte Q6_p_all8_p(Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_all8_p __builtin_HEXAGON_C2_all8 + +/* ========================================================================== + Assembly Syntax: Pd4=and(Pt4,Ps4) + C Intrinsic Prototype: Byte Q6_p_and_pp(Byte Pt, Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_pp __builtin_HEXAGON_C2_and + +/* ========================================================================== + Assembly Syntax: Pd4=and(Pt4,!Ps4) + C Intrinsic Prototype: Byte Q6_p_and_pnp(Byte Pt, Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_pnp __builtin_HEXAGON_C2_andn + +/* ========================================================================== + Assembly Syntax: Pd4=any8(Ps4) + C Intrinsic Prototype: Byte Q6_p_any8_p(Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_any8_p __builtin_HEXAGON_C2_any8 + +/* ========================================================================== + Assembly Syntax: Pd4=bitsclr(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_bitsclr_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_bitsclr_RR __builtin_HEXAGON_C2_bitsclr + +/* ========================================================================== + Assembly Syntax: Pd4=bitsclr(Rs32,#u6) + C Intrinsic Prototype: Byte Q6_p_bitsclr_RI(Word32 Rs, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_bitsclr_RI __builtin_HEXAGON_C2_bitsclri + +/* ========================================================================== + Assembly Syntax: Pd4=bitsset(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_bitsset_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_bitsset_RR __builtin_HEXAGON_C2_bitsset + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.eq(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmp_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_eq_RR __builtin_HEXAGON_C2_cmpeq + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.eq(Rs32,#s10) + C Intrinsic Prototype: Byte Q6_p_cmp_eq_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_eq_RI __builtin_HEXAGON_C2_cmpeqi + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.eq(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_cmp_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmp_eq_PP __builtin_HEXAGON_C2_cmpeqp + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.ge(Rs32,#s8) + C Intrinsic Prototype: Byte Q6_p_cmp_ge_RI(Word32 Rs, Word32 Is8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_ge_RI __builtin_HEXAGON_C2_cmpgei + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.geu(Rs32,#u8) + C Intrinsic Prototype: Byte Q6_p_cmp_geu_RI(Word32 Rs, Word32 Iu8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_geu_RI __builtin_HEXAGON_C2_cmpgeui + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmp_gt_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_gt_RR __builtin_HEXAGON_C2_cmpgt + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gt(Rs32,#s10) + C Intrinsic Prototype: Byte Q6_p_cmp_gt_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_gt_RI __builtin_HEXAGON_C2_cmpgti + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gt(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_cmp_gt_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmp_gt_PP __builtin_HEXAGON_C2_cmpgtp + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gtu(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_gtu_RR __builtin_HEXAGON_C2_cmpgtu + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gtu(Rs32,#u9) + C Intrinsic Prototype: Byte Q6_p_cmp_gtu_RI(Word32 Rs, Word32 Iu9) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_gtu_RI __builtin_HEXAGON_C2_cmpgtui + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.gtu(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_cmp_gtu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_cmp_gtu_PP __builtin_HEXAGON_C2_cmpgtup + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.lt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmp_lt_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_lt_RR __builtin_HEXAGON_C2_cmplt + +/* ========================================================================== + Assembly Syntax: Pd4=cmp.ltu(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_cmp_ltu_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_cmp_ltu_RR __builtin_HEXAGON_C2_cmpltu + +/* ========================================================================== + Assembly Syntax: Rdd32=mask(Pt4) + C Intrinsic Prototype: Word64 Q6_P_mask_p(Byte Pt) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mask_p __builtin_HEXAGON_C2_mask + +/* ========================================================================== + Assembly Syntax: Rd32=mux(Pu4,Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mux_pRR(Byte Pu, Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mux_pRR __builtin_HEXAGON_C2_mux + +/* ========================================================================== + Assembly Syntax: Rd32=mux(Pu4,#s8,#S8) + C Intrinsic Prototype: Word32 Q6_R_mux_pII(Byte Pu, Word32 Is8, Word32 IS8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mux_pII __builtin_HEXAGON_C2_muxii + +/* ========================================================================== + Assembly Syntax: Rd32=mux(Pu4,Rs32,#s8) + C Intrinsic Prototype: Word32 Q6_R_mux_pRI(Byte Pu, Word32 Rs, Word32 Is8) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mux_pRI __builtin_HEXAGON_C2_muxir + +/* ========================================================================== + Assembly Syntax: Rd32=mux(Pu4,#s8,Rs32) + C Intrinsic Prototype: Word32 Q6_R_mux_pIR(Byte Pu, Word32 Is8, Word32 Rs) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mux_pIR __builtin_HEXAGON_C2_muxri + +/* ========================================================================== + Assembly Syntax: Pd4=not(Ps4) + C Intrinsic Prototype: Byte Q6_p_not_p(Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_p __builtin_HEXAGON_C2_not + +/* ========================================================================== + Assembly Syntax: Pd4=or(Pt4,Ps4) + C Intrinsic Prototype: Byte Q6_p_or_pp(Byte Pt, Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_pp __builtin_HEXAGON_C2_or + +/* ========================================================================== + Assembly Syntax: Pd4=or(Pt4,!Ps4) + C Intrinsic Prototype: Byte Q6_p_or_pnp(Byte Pt, Byte Ps) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_pnp __builtin_HEXAGON_C2_orn + +/* ========================================================================== + Assembly Syntax: Pd4=Ps4 + C Intrinsic Prototype: Byte Q6_p_equals_p(Byte Ps) + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_equals_p __builtin_HEXAGON_C2_pxfer_map + +/* ========================================================================== + Assembly Syntax: Rd32=Ps4 + C Intrinsic Prototype: Word32 Q6_R_equals_p(Byte Ps) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_equals_p __builtin_HEXAGON_C2_tfrpr + +/* ========================================================================== + Assembly Syntax: Pd4=Rs32 + C Intrinsic Prototype: Byte Q6_p_equals_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_equals_R __builtin_HEXAGON_C2_tfrrp + +/* ========================================================================== + Assembly Syntax: Rd32=vitpack(Ps4,Pt4) + C Intrinsic Prototype: Word32 Q6_R_vitpack_pp(Byte Ps, Byte Pt) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vitpack_pp __builtin_HEXAGON_C2_vitpack + +/* ========================================================================== + Assembly Syntax: Rdd32=vmux(Pu4,Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vmux_pPP(Byte Pu, Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmux_pPP __builtin_HEXAGON_C2_vmux + +/* ========================================================================== + Assembly Syntax: Pd4=xor(Ps4,Pt4) + C Intrinsic Prototype: Byte Q6_p_xor_pp(Byte Ps, Byte Pt) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_xor_pp __builtin_HEXAGON_C2_xor + +/* ========================================================================== + Assembly Syntax: Pd4=and(Ps4,and(Pt4,Pu4)) + C Intrinsic Prototype: Byte Q6_p_and_and_ppp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_and_ppp __builtin_HEXAGON_C4_and_and + +/* ========================================================================== + Assembly Syntax: Pd4=and(Ps4,and(Pt4,!Pu4)) + C Intrinsic Prototype: Byte Q6_p_and_and_ppnp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_and_ppnp __builtin_HEXAGON_C4_and_andn + +/* ========================================================================== + Assembly Syntax: Pd4=and(Ps4,or(Pt4,Pu4)) + C Intrinsic Prototype: Byte Q6_p_and_or_ppp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_or_ppp __builtin_HEXAGON_C4_and_or + +/* ========================================================================== + Assembly Syntax: Pd4=and(Ps4,or(Pt4,!Pu4)) + C Intrinsic Prototype: Byte Q6_p_and_or_ppnp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_and_or_ppnp __builtin_HEXAGON_C4_and_orn + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.gt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_gt_RR __builtin_HEXAGON_C4_cmplte + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.gt(Rs32,#s10) + C Intrinsic Prototype: Byte Q6_p_not_cmp_gt_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_gt_RI __builtin_HEXAGON_C4_cmpltei + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.gtu(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_gtu_RR __builtin_HEXAGON_C4_cmplteu + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.gtu(Rs32,#u9) + C Intrinsic Prototype: Byte Q6_p_not_cmp_gtu_RI(Word32 Rs, Word32 Iu9) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_gtu_RI __builtin_HEXAGON_C4_cmplteui + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.eq(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_eq_RR __builtin_HEXAGON_C4_cmpneq + +/* ========================================================================== + Assembly Syntax: Pd4=!cmp.eq(Rs32,#s10) + C Intrinsic Prototype: Byte Q6_p_not_cmp_eq_RI(Word32 Rs, Word32 Is10) + Instruction Type: ALU32_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_p_not_cmp_eq_RI __builtin_HEXAGON_C4_cmpneqi + +/* ========================================================================== + Assembly Syntax: Pd4=fastcorner9(Ps4,Pt4) + C Intrinsic Prototype: Byte Q6_p_fastcorner9_pp(Byte Ps, Byte Pt) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9 + +/* ========================================================================== + Assembly Syntax: Pd4=!fastcorner9(Ps4,Pt4) + C Intrinsic Prototype: Byte Q6_p_not_fastcorner9_pp(Byte Ps, Byte Pt) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_fastcorner9_pp __builtin_HEXAGON_C4_fastcorner9_not + +/* ========================================================================== + Assembly Syntax: Pd4=!bitsclr(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_bitsclr_RR __builtin_HEXAGON_C4_nbitsclr + +/* ========================================================================== + Assembly Syntax: Pd4=!bitsclr(Rs32,#u6) + C Intrinsic Prototype: Byte Q6_p_not_bitsclr_RI(Word32 Rs, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_bitsclr_RI __builtin_HEXAGON_C4_nbitsclri + +/* ========================================================================== + Assembly Syntax: Pd4=!bitsset(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_bitsset_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_bitsset_RR __builtin_HEXAGON_C4_nbitsset + +/* ========================================================================== + Assembly Syntax: Pd4=or(Ps4,and(Pt4,Pu4)) + C Intrinsic Prototype: Byte Q6_p_or_and_ppp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_and_ppp __builtin_HEXAGON_C4_or_and + +/* ========================================================================== + Assembly Syntax: Pd4=or(Ps4,and(Pt4,!Pu4)) + C Intrinsic Prototype: Byte Q6_p_or_and_ppnp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_and_ppnp __builtin_HEXAGON_C4_or_andn + +/* ========================================================================== + Assembly Syntax: Pd4=or(Ps4,or(Pt4,Pu4)) + C Intrinsic Prototype: Byte Q6_p_or_or_ppp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_or_ppp __builtin_HEXAGON_C4_or_or + +/* ========================================================================== + Assembly Syntax: Pd4=or(Ps4,or(Pt4,!Pu4)) + C Intrinsic Prototype: Byte Q6_p_or_or_ppnp(Byte Ps, Byte Pt, Byte Pu) + Instruction Type: CR + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_or_or_ppnp __builtin_HEXAGON_C4_or_orn + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_d2df(Rss32) + C Intrinsic Prototype: Float64 Q6_P_convert_d2df_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_d2df_P __builtin_HEXAGON_F2_conv_d2df + +/* ========================================================================== + Assembly Syntax: Rd32=convert_d2sf(Rss32) + C Intrinsic Prototype: Float32 Q6_R_convert_d2sf_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_d2sf_P __builtin_HEXAGON_F2_conv_d2sf + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_df2d(Rss32) + C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_df2d_P __builtin_HEXAGON_F2_conv_df2d + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_df2d(Rss32):chop + C Intrinsic Prototype: Word64 Q6_P_convert_df2d_P_chop(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_df2d_P_chop __builtin_HEXAGON_F2_conv_df2d_chop + +/* ========================================================================== + Assembly Syntax: Rd32=convert_df2sf(Rss32) + C Intrinsic Prototype: Float32 Q6_R_convert_df2sf_P(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_df2sf_P __builtin_HEXAGON_F2_conv_df2sf + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_df2ud(Rss32) + C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_df2ud_P __builtin_HEXAGON_F2_conv_df2ud + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_df2ud(Rss32):chop + C Intrinsic Prototype: Word64 Q6_P_convert_df2ud_P_chop(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_df2ud_P_chop __builtin_HEXAGON_F2_conv_df2ud_chop + +/* ========================================================================== + Assembly Syntax: Rd32=convert_df2uw(Rss32) + C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_df2uw_P __builtin_HEXAGON_F2_conv_df2uw + +/* ========================================================================== + Assembly Syntax: Rd32=convert_df2uw(Rss32):chop + C Intrinsic Prototype: Word32 Q6_R_convert_df2uw_P_chop(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_df2uw_P_chop __builtin_HEXAGON_F2_conv_df2uw_chop + +/* ========================================================================== + Assembly Syntax: Rd32=convert_df2w(Rss32) + C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_df2w_P __builtin_HEXAGON_F2_conv_df2w + +/* ========================================================================== + Assembly Syntax: Rd32=convert_df2w(Rss32):chop + C Intrinsic Prototype: Word32 Q6_R_convert_df2w_P_chop(Float64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_df2w_P_chop __builtin_HEXAGON_F2_conv_df2w_chop + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_sf2d(Rs32) + C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_sf2d_R __builtin_HEXAGON_F2_conv_sf2d + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_sf2d(Rs32):chop + C Intrinsic Prototype: Word64 Q6_P_convert_sf2d_R_chop(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_sf2d_R_chop __builtin_HEXAGON_F2_conv_sf2d_chop + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_sf2df(Rs32) + C Intrinsic Prototype: Float64 Q6_P_convert_sf2df_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_sf2df_R __builtin_HEXAGON_F2_conv_sf2df + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_sf2ud(Rs32) + C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_sf2ud_R __builtin_HEXAGON_F2_conv_sf2ud + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_sf2ud(Rs32):chop + C Intrinsic Prototype: Word64 Q6_P_convert_sf2ud_R_chop(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_sf2ud_R_chop __builtin_HEXAGON_F2_conv_sf2ud_chop + +/* ========================================================================== + Assembly Syntax: Rd32=convert_sf2uw(Rs32) + C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_sf2uw_R __builtin_HEXAGON_F2_conv_sf2uw + +/* ========================================================================== + Assembly Syntax: Rd32=convert_sf2uw(Rs32):chop + C Intrinsic Prototype: Word32 Q6_R_convert_sf2uw_R_chop(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_sf2uw_R_chop __builtin_HEXAGON_F2_conv_sf2uw_chop + +/* ========================================================================== + Assembly Syntax: Rd32=convert_sf2w(Rs32) + C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_sf2w_R __builtin_HEXAGON_F2_conv_sf2w + +/* ========================================================================== + Assembly Syntax: Rd32=convert_sf2w(Rs32):chop + C Intrinsic Prototype: Word32 Q6_R_convert_sf2w_R_chop(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_sf2w_R_chop __builtin_HEXAGON_F2_conv_sf2w_chop + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_ud2df(Rss32) + C Intrinsic Prototype: Float64 Q6_P_convert_ud2df_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_ud2df_P __builtin_HEXAGON_F2_conv_ud2df + +/* ========================================================================== + Assembly Syntax: Rd32=convert_ud2sf(Rss32) + C Intrinsic Prototype: Float32 Q6_R_convert_ud2sf_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_ud2sf_P __builtin_HEXAGON_F2_conv_ud2sf + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_uw2df(Rs32) + C Intrinsic Prototype: Float64 Q6_P_convert_uw2df_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_uw2df_R __builtin_HEXAGON_F2_conv_uw2df + +/* ========================================================================== + Assembly Syntax: Rd32=convert_uw2sf(Rs32) + C Intrinsic Prototype: Float32 Q6_R_convert_uw2sf_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_uw2sf_R __builtin_HEXAGON_F2_conv_uw2sf + +/* ========================================================================== + Assembly Syntax: Rdd32=convert_w2df(Rs32) + C Intrinsic Prototype: Float64 Q6_P_convert_w2df_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_convert_w2df_R __builtin_HEXAGON_F2_conv_w2df + +/* ========================================================================== + Assembly Syntax: Rd32=convert_w2sf(Rs32) + C Intrinsic Prototype: Float32 Q6_R_convert_w2sf_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_convert_w2sf_R __builtin_HEXAGON_F2_conv_w2sf + +/* ========================================================================== + Assembly Syntax: Pd4=dfclass(Rss32,#u5) + C Intrinsic Prototype: Byte Q6_p_dfclass_PI(Float64 Rss, Word32 Iu5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_dfclass_PI __builtin_HEXAGON_F2_dfclass + +/* ========================================================================== + Assembly Syntax: Pd4=dfcmp.eq(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_dfcmp_eq_PP(Float64 Rss, Float64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_dfcmp_eq_PP __builtin_HEXAGON_F2_dfcmpeq + +/* ========================================================================== + Assembly Syntax: Pd4=dfcmp.ge(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_dfcmp_ge_PP(Float64 Rss, Float64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_dfcmp_ge_PP __builtin_HEXAGON_F2_dfcmpge + +/* ========================================================================== + Assembly Syntax: Pd4=dfcmp.gt(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_dfcmp_gt_PP(Float64 Rss, Float64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_dfcmp_gt_PP __builtin_HEXAGON_F2_dfcmpgt + +/* ========================================================================== + Assembly Syntax: Pd4=dfcmp.uo(Rss32,Rtt32) + C Intrinsic Prototype: Byte Q6_p_dfcmp_uo_PP(Float64 Rss, Float64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_dfcmp_uo_PP __builtin_HEXAGON_F2_dfcmpuo + +/* ========================================================================== + Assembly Syntax: Rdd32=dfmake(#u10):neg + C Intrinsic Prototype: Float64 Q6_P_dfmake_I_neg(Word32 Iu10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmake_I_neg __builtin_HEXAGON_F2_dfimm_n + +/* ========================================================================== + Assembly Syntax: Rdd32=dfmake(#u10):pos + C Intrinsic Prototype: Float64 Q6_P_dfmake_I_pos(Word32 Iu10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmake_I_pos __builtin_HEXAGON_F2_dfimm_p + +/* ========================================================================== + Assembly Syntax: Rd32=sfadd(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfadd_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfadd_RR __builtin_HEXAGON_F2_sfadd + +/* ========================================================================== + Assembly Syntax: Pd4=sfclass(Rs32,#u5) + C Intrinsic Prototype: Byte Q6_p_sfclass_RI(Float32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_sfclass_RI __builtin_HEXAGON_F2_sfclass + +/* ========================================================================== + Assembly Syntax: Pd4=sfcmp.eq(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_sfcmp_eq_RR(Float32 Rs, Float32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_sfcmp_eq_RR __builtin_HEXAGON_F2_sfcmpeq + +/* ========================================================================== + Assembly Syntax: Pd4=sfcmp.ge(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_sfcmp_ge_RR(Float32 Rs, Float32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_sfcmp_ge_RR __builtin_HEXAGON_F2_sfcmpge + +/* ========================================================================== + Assembly Syntax: Pd4=sfcmp.gt(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_sfcmp_gt_RR(Float32 Rs, Float32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_sfcmp_gt_RR __builtin_HEXAGON_F2_sfcmpgt + +/* ========================================================================== + Assembly Syntax: Pd4=sfcmp.uo(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_sfcmp_uo_RR(Float32 Rs, Float32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_sfcmp_uo_RR __builtin_HEXAGON_F2_sfcmpuo + +/* ========================================================================== + Assembly Syntax: Rd32=sffixupd(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sffixupd_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sffixupd_RR __builtin_HEXAGON_F2_sffixupd + +/* ========================================================================== + Assembly Syntax: Rd32=sffixupn(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sffixupn_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sffixupn_RR __builtin_HEXAGON_F2_sffixupn + +/* ========================================================================== + Assembly Syntax: Rd32=sffixupr(Rs32) + C Intrinsic Prototype: Float32 Q6_R_sffixupr_R(Float32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sffixupr_R __builtin_HEXAGON_F2_sffixupr + +/* ========================================================================== + Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR(Float32 Rx, Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpyacc_RR __builtin_HEXAGON_F2_sffma + +/* ========================================================================== + Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32):lib + C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpyacc_RR_lib __builtin_HEXAGON_F2_sffma_lib + +/* ========================================================================== + Assembly Syntax: Rx32+=sfmpy(Rs32,Rt32,Pu4):scale + C Intrinsic Prototype: Float32 Q6_R_sfmpyacc_RRp_scale(Float32 Rx, Float32 Rs, Float32 Rt, Byte Pu) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpyacc_RRp_scale __builtin_HEXAGON_F2_sffma_sc + +/* ========================================================================== + Assembly Syntax: Rx32-=sfmpy(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR(Float32 Rx, Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpynac_RR __builtin_HEXAGON_F2_sffms + +/* ========================================================================== + Assembly Syntax: Rx32-=sfmpy(Rs32,Rt32):lib + C Intrinsic Prototype: Float32 Q6_R_sfmpynac_RR_lib(Float32 Rx, Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpynac_RR_lib __builtin_HEXAGON_F2_sffms_lib + +/* ========================================================================== + Assembly Syntax: Rd32=sfmake(#u10):neg + C Intrinsic Prototype: Float32 Q6_R_sfmake_I_neg(Word32 Iu10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmake_I_neg __builtin_HEXAGON_F2_sfimm_n + +/* ========================================================================== + Assembly Syntax: Rd32=sfmake(#u10):pos + C Intrinsic Prototype: Float32 Q6_R_sfmake_I_pos(Word32 Iu10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmake_I_pos __builtin_HEXAGON_F2_sfimm_p + +/* ========================================================================== + Assembly Syntax: Rd32=sfmax(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfmax_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmax_RR __builtin_HEXAGON_F2_sfmax + +/* ========================================================================== + Assembly Syntax: Rd32=sfmin(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfmin_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmin_RR __builtin_HEXAGON_F2_sfmin + +/* ========================================================================== + Assembly Syntax: Rd32=sfmpy(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfmpy_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfmpy_RR __builtin_HEXAGON_F2_sfmpy + +/* ========================================================================== + Assembly Syntax: Rd32=sfsub(Rs32,Rt32) + C Intrinsic Prototype: Float32 Q6_R_sfsub_RR(Float32 Rs, Float32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sfsub_RR __builtin_HEXAGON_F2_sfsub + +/* ========================================================================== + Assembly Syntax: Rd32=memb(Rx32++#s4:0:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memb_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memb_IM_circ __builtin_HEXAGON_L2_loadrb_pci + +/* ========================================================================== + Assembly Syntax: Rd32=memb(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memb_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memb_M_circ __builtin_HEXAGON_L2_loadrb_pcr + +/* ========================================================================== + Assembly Syntax: Rdd32=memd(Rx32++#s4:3:circ(Mu2)) + C Intrinsic Prototype: Word64 Q6_P_memd_IM_circ(void** Rx, Word32 Is4_3, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_P_memd_IM_circ __builtin_HEXAGON_L2_loadrd_pci + +/* ========================================================================== + Assembly Syntax: Rdd32=memd(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word64 Q6_P_memd_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_P_memd_M_circ __builtin_HEXAGON_L2_loadrd_pcr + +/* ========================================================================== + Assembly Syntax: Rd32=memh(Rx32++#s4:1:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memh_IM_circ __builtin_HEXAGON_L2_loadrh_pci + +/* ========================================================================== + Assembly Syntax: Rd32=memh(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memh_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memh_M_circ __builtin_HEXAGON_L2_loadrh_pcr + +/* ========================================================================== + Assembly Syntax: Rd32=memw(Rx32++#s4:2:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memw_IM_circ(void** Rx, Word32 Is4_2, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memw_IM_circ __builtin_HEXAGON_L2_loadri_pci + +/* ========================================================================== + Assembly Syntax: Rd32=memw(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memw_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memw_M_circ __builtin_HEXAGON_L2_loadri_pcr + +/* ========================================================================== + Assembly Syntax: Rd32=memub(Rx32++#s4:0:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memub_IM_circ(void** Rx, Word32 Is4_0, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memub_IM_circ __builtin_HEXAGON_L2_loadrub_pci + +/* ========================================================================== + Assembly Syntax: Rd32=memub(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memub_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memub_M_circ __builtin_HEXAGON_L2_loadrub_pcr + +/* ========================================================================== + Assembly Syntax: Rd32=memuh(Rx32++#s4:1:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memuh_IM_circ(void** Rx, Word32 Is4_1, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memuh_IM_circ __builtin_HEXAGON_L2_loadruh_pci + +/* ========================================================================== + Assembly Syntax: Rd32=memuh(Rx32++I:circ(Mu2)) + C Intrinsic Prototype: Word32 Q6_R_memuh_M_circ(void** Rx, Word32 Mu, void* BaseAddress) + Instruction Type: LD + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_R_memuh_M_circ __builtin_HEXAGON_L2_loadruh_pcr + +/* ========================================================================== + Assembly Syntax: Rx32+=add(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_addacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_addacc_RR __builtin_HEXAGON_M2_acci + +/* ========================================================================== + Assembly Syntax: Rx32+=add(Rs32,#s8) + C Intrinsic Prototype: Word32 Q6_R_addacc_RI(Word32 Rx, Word32 Rs, Word32 Is8) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_addacc_RI __builtin_HEXAGON_M2_accii + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyi(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyiacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyiacc_RR __builtin_HEXAGON_M2_cmaci_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyr(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyracc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyracc_RR __builtin_HEXAGON_M2_cmacr_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyacc_RR_sat __builtin_HEXAGON_M2_cmacs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyacc_RR_s1_sat __builtin_HEXAGON_M2_cmacs_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32*):sat + C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyacc_RR_conj_sat __builtin_HEXAGON_M2_cmacsc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpy(Rs32,Rt32*):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpyacc_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyacc_RR_conj_s1_sat __builtin_HEXAGON_M2_cmacsc_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyi(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyi_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyi_RR __builtin_HEXAGON_M2_cmpyi_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyr(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyr_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpyr_RR __builtin_HEXAGON_M2_cmpyr_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=cmpy(Rs32,Rt32):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpy_RR_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=cmpy(Rs32,Rt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpy_RR_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrs_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=cmpy(Rs32,Rt32*):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpy_RR_conj_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=cmpy(Rs32,Rt32*):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpy_RR_conj_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpy_RR_conj_s1_rnd_sat __builtin_HEXAGON_M2_cmpyrsc_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpy(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpy_RR_sat __builtin_HEXAGON_M2_cmpys_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpy_RR_s1_sat __builtin_HEXAGON_M2_cmpys_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpy(Rs32,Rt32*):sat + C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpy_RR_conj_sat __builtin_HEXAGON_M2_cmpysc_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=cmpy(Rs32,Rt32*):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpy_RR_conj_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpy_RR_conj_s1_sat __builtin_HEXAGON_M2_cmpysc_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpynac_RR_sat __builtin_HEXAGON_M2_cnacs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpynac_RR_s1_sat __builtin_HEXAGON_M2_cnacs_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32*):sat + C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpynac_RR_conj_sat __builtin_HEXAGON_M2_cnacsc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=cmpy(Rs32,Rt32*):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_cmpynac_RR_conj_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cmpynac_RR_conj_s1_sat __builtin_HEXAGON_M2_cnacsc_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RR __builtin_HEXAGON_M2_dpmpyss_acc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_mpynac_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RR __builtin_HEXAGON_M2_dpmpyss_nac_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32):rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RR_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RR_rnd __builtin_HEXAGON_M2_dpmpyss_rnd_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_mpy_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RR __builtin_HEXAGON_M2_dpmpyss_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RR __builtin_HEXAGON_M2_dpmpyuu_acc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RR __builtin_HEXAGON_M2_dpmpyuu_nac_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32,Rt32) + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RR __builtin_HEXAGON_M2_dpmpyuu_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32.h):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RRh_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyh_rs1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RRh_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RRh_s1_sat __builtin_HEXAGON_M2_hmmpyh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32.l):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RRl_s1_rnd_sat __builtin_HEXAGON_M2_hmmpyl_rs1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RRl_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RRl_s1_sat __builtin_HEXAGON_M2_hmmpyl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyi(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyiacc_RR __builtin_HEXAGON_M2_maci + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyi(Rs32,#u8) + C Intrinsic Prototype: Word32 Q6_R_mpyinac_RI(Word32 Rx, Word32 Rs, Word32 Iu8) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyinac_RI __builtin_HEXAGON_M2_macsin + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyi(Rs32,#u8) + C Intrinsic Prototype: Word32 Q6_R_mpyiacc_RI(Word32 Rx, Word32 Rs, Word32 Iu8) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyiacc_RI __builtin_HEXAGON_M2_macsip + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywohacc_PP_rnd_sat __builtin_HEXAGON_M2_mmachs_rs0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywohacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmachs_rs1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywohacc_PP_sat __builtin_HEXAGON_M2_mmachs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywoh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywohacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywohacc_PP_s1_sat __builtin_HEXAGON_M2_mmachs_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywehacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacls_rs0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywehacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacls_rs1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywehacc_PP_sat __builtin_HEXAGON_M2_mmacls_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywehacc_PP_s1_sat __builtin_HEXAGON_M2_mmacls_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmacuhs_rs1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouhacc_PP_sat __builtin_HEXAGON_M2_mmacuhs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpywouh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouhacc_PP_s1_sat __builtin_HEXAGON_M2_mmacuhs_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuhacc_PP_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_rnd_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuhacc_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmaculs_rs1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuhacc_PP_sat __builtin_HEXAGON_M2_mmaculs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyweuh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuhacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuhacc_PP_s1_sat __builtin_HEXAGON_M2_mmaculs_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywoh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywoh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyh_rs1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywoh_PP_sat __builtin_HEXAGON_M2_mmpyh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywoh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywoh_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywoh_PP_s1_sat __builtin_HEXAGON_M2_mmpyh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyl_rs1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweh_PP_sat __builtin_HEXAGON_M2_mmpyl_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweh_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweh_PP_s1_sat __builtin_HEXAGON_M2_mmpyl_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyuh_rs1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouh_PP_sat __builtin_HEXAGON_M2_mmpyuh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpywouh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpywouh_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpywouh_PP_s1_sat __builtin_HEXAGON_M2_mmpyuh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuh_PP_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuh_PP_s1_rnd_sat __builtin_HEXAGON_M2_mmpyul_rs1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuh_PP_sat __builtin_HEXAGON_M2_mmpyul_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyweuh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyweuh_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyweuh_PP_s1_sat __builtin_HEXAGON_M2_mmpyul_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRh __builtin_HEXAGON_M2_mpy_acc_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpy_acc_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRl __builtin_HEXAGON_M2_mpy_acc_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpy_acc_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRh __builtin_HEXAGON_M2_mpy_acc_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpy_acc_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRl __builtin_HEXAGON_M2_mpy_acc_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpy_acc_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.h,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRh_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRl_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32.l,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_acc_sat_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh __builtin_HEXAGON_M2_mpy_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpy_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl __builtin_HEXAGON_M2_mpy_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpy_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh __builtin_HEXAGON_M2_mpy_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpy_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl __builtin_HEXAGON_M2_mpy_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpy_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRh __builtin_HEXAGON_M2_mpy_nac_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpy_nac_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRl __builtin_HEXAGON_M2_mpy_nac_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpy_nac_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRh __builtin_HEXAGON_M2_mpy_nac_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpy_nac_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRl __builtin_HEXAGON_M2_mpy_nac_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpy_nac_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.h,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RhRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRh_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRh_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRl_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32.l,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RlRl_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_nac_sat_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpy_rnd_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_sat __builtin_HEXAGON_M2_mpy_sat_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_sat __builtin_HEXAGON_M2_mpy_sat_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_sat __builtin_HEXAGON_M2_mpy_sat_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_s1_sat __builtin_HEXAGON_M2_mpy_sat_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_sat __builtin_HEXAGON_M2_mpy_sat_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_s1_sat __builtin_HEXAGON_M2_mpy_sat_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.h):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRh_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.h,Rt32.l):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RhRl_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RhRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.h):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRh_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRh_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32.l,Rt32.l):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RlRl_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RlRl_s1_rnd_sat __builtin_HEXAGON_M2_mpy_sat_rnd_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpy_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RR __builtin_HEXAGON_M2_mpy_up + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RR_s1 __builtin_HEXAGON_M2_mpy_up_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpy_RR_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpy_RR_s1_sat __builtin_HEXAGON_M2_mpy_up_s1_sat + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RhRh __builtin_HEXAGON_M2_mpyd_acc_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RhRh_s1 __builtin_HEXAGON_M2_mpyd_acc_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RhRl __builtin_HEXAGON_M2_mpyd_acc_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RhRl_s1 __builtin_HEXAGON_M2_mpyd_acc_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RlRh __builtin_HEXAGON_M2_mpyd_acc_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RlRh_s1 __builtin_HEXAGON_M2_mpyd_acc_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RlRl __builtin_HEXAGON_M2_mpyd_acc_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyacc_RlRl_s1 __builtin_HEXAGON_M2_mpyd_acc_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRh __builtin_HEXAGON_M2_mpyd_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRh_s1 __builtin_HEXAGON_M2_mpyd_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRl __builtin_HEXAGON_M2_mpyd_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRl_s1 __builtin_HEXAGON_M2_mpyd_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRh __builtin_HEXAGON_M2_mpyd_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRh_s1 __builtin_HEXAGON_M2_mpyd_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRl __builtin_HEXAGON_M2_mpyd_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRl_s1 __builtin_HEXAGON_M2_mpyd_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RhRh __builtin_HEXAGON_M2_mpyd_nac_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RhRh_s1 __builtin_HEXAGON_M2_mpyd_nac_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RhRl __builtin_HEXAGON_M2_mpyd_nac_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpynac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RhRl_s1 __builtin_HEXAGON_M2_mpyd_nac_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RlRh __builtin_HEXAGON_M2_mpyd_nac_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RlRh_s1 __builtin_HEXAGON_M2_mpyd_nac_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RlRl __builtin_HEXAGON_M2_mpyd_nac_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpy(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpynac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpynac_RlRl_s1 __builtin_HEXAGON_M2_mpyd_nac_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.h):<<1:rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRh_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.h,Rt32.l):<<1:rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RhRl_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RhRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRh_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.h):<<1:rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRh_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRh_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRl_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpy(Rs32.l,Rt32.l):<<1:rnd + C Intrinsic Prototype: Word64 Q6_P_mpy_RlRl_s1_rnd(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpy_RlRl_s1_rnd __builtin_HEXAGON_M2_mpyd_rnd_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyi(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpyi_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyi_RR __builtin_HEXAGON_M2_mpyi + +/* ========================================================================== + Assembly Syntax: Rd32=mpyi(Rs32,#m9) + C Intrinsic Prototype: Word32 Q6_R_mpyi_RI(Word32 Rs, Word32 Im9) + Instruction Type: M + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mpyi_RI __builtin_HEXAGON_M2_mpysmi + +/* ========================================================================== + Assembly Syntax: Rd32=mpysu(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpysu_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpysu_RR __builtin_HEXAGON_M2_mpysu_up + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyu_acc_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyu_acc_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyu_acc_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyu_acc_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyu_acc_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyu_acc_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyu_acc_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32+=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyuacc_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyu_acc_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RhRh __builtin_HEXAGON_M2_mpyu_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyu_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RhRl __builtin_HEXAGON_M2_mpyu_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RhRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyu_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RlRh __builtin_HEXAGON_M2_mpyu_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyu_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RlRl __builtin_HEXAGON_M2_mpyu_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RlRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyu_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RhRh __builtin_HEXAGON_M2_mpyu_nac_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyu_nac_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RhRl __builtin_HEXAGON_M2_mpyu_nac_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RhRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyu_nac_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RlRh __builtin_HEXAGON_M2_mpyu_nac_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRh_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyu_nac_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RlRl __builtin_HEXAGON_M2_mpyu_nac_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rx32-=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word32 Q6_R_mpyunac_RlRl_s1(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyu_nac_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyu(Rs32,Rt32) + C Intrinsic Prototype: UWord32 Q6_R_mpyu_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyu_RR __builtin_HEXAGON_M2_mpyu_up + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RhRh __builtin_HEXAGON_M2_mpyud_acc_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RhRh_s1 __builtin_HEXAGON_M2_mpyud_acc_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RhRl __builtin_HEXAGON_M2_mpyud_acc_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RhRl_s1 __builtin_HEXAGON_M2_mpyud_acc_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RlRh __builtin_HEXAGON_M2_mpyud_acc_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RlRh_s1 __builtin_HEXAGON_M2_mpyud_acc_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RlRl __builtin_HEXAGON_M2_mpyud_acc_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyuacc_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyuacc_RlRl_s1 __builtin_HEXAGON_M2_mpyud_acc_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RhRh __builtin_HEXAGON_M2_mpyud_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RhRh_s1 __builtin_HEXAGON_M2_mpyud_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RhRl __builtin_HEXAGON_M2_mpyud_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RhRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RhRl_s1 __builtin_HEXAGON_M2_mpyud_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RlRh __builtin_HEXAGON_M2_mpyud_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRh_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RlRh_s1 __builtin_HEXAGON_M2_mpyud_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RlRl __builtin_HEXAGON_M2_mpyud_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: UWord64 Q6_P_mpyu_RlRl_s1(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyu_RlRl_s1 __builtin_HEXAGON_M2_mpyud_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RhRh __builtin_HEXAGON_M2_mpyud_nac_hh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RhRh_s1 __builtin_HEXAGON_M2_mpyud_nac_hh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RhRl __builtin_HEXAGON_M2_mpyud_nac_hl_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.h,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RhRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RhRl_s1 __builtin_HEXAGON_M2_mpyud_nac_hl_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.h) + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RlRh __builtin_HEXAGON_M2_mpyud_nac_lh_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.h):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRh_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RlRh_s1 __builtin_HEXAGON_M2_mpyud_nac_lh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.l) + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RlRl __builtin_HEXAGON_M2_mpyud_nac_ll_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32-=mpyu(Rs32.l,Rt32.l):<<1 + C Intrinsic Prototype: Word64 Q6_P_mpyunac_RlRl_s1(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_mpyunac_RlRl_s1 __builtin_HEXAGON_M2_mpyud_nac_ll_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=mpyui(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpyui_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_mpyui_RR __builtin_HEXAGON_M2_mpyui + +/* ========================================================================== + Assembly Syntax: Rx32-=add(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_addnac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_addnac_RR __builtin_HEXAGON_M2_nacci + +/* ========================================================================== + Assembly Syntax: Rx32-=add(Rs32,#s8) + C Intrinsic Prototype: Word32 Q6_R_addnac_RI(Word32 Rx, Word32 Rs, Word32 Is8) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_addnac_RI __builtin_HEXAGON_M2_naccii + +/* ========================================================================== + Assembly Syntax: Rx32+=sub(Rt32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_subacc_RR(Word32 Rx, Word32 Rt, Word32 Rs) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_subacc_RR __builtin_HEXAGON_M2_subacc + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsdiffh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsdiffh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsdiffh_PP __builtin_HEXAGON_M2_vabsdiffh + +/* ========================================================================== + Assembly Syntax: Rdd32=vabsdiffw(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsdiffw_PP(Word64 Rtt, Word64 Rss) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsdiffw_PP __builtin_HEXAGON_M2_vabsdiffw + +/* ========================================================================== + Assembly Syntax: Rxx32+=vcmpyi(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyiacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyiacc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_i + +/* ========================================================================== + Assembly Syntax: Rxx32+=vcmpyr(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyracc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyracc_PP_sat __builtin_HEXAGON_M2_vcmac_s0_sat_r + +/* ========================================================================== + Assembly Syntax: Rdd32=vcmpyi(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyi_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_i + +/* ========================================================================== + Assembly Syntax: Rdd32=vcmpyr(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyr_PP_sat __builtin_HEXAGON_M2_vcmpy_s0_sat_r + +/* ========================================================================== + Assembly Syntax: Rdd32=vcmpyi(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyi_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyi_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_i + +/* ========================================================================== + Assembly Syntax: Rdd32=vcmpyr(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vcmpyr_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcmpyr_PP_s1_sat __builtin_HEXAGON_M2_vcmpy_s1_sat_r + +/* ========================================================================== + Assembly Syntax: Rxx32+=vdmpy(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpyacc_PP_sat __builtin_HEXAGON_M2_vdmacs_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vdmpy(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vdmpyacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpyacc_PP_s1_sat __builtin_HEXAGON_M2_vdmacs_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=vdmpy(Rss32,Rtt32):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vdmpy_PP_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=vdmpy(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vdmpy_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vdmpy_PP_s1_rnd_sat __builtin_HEXAGON_M2_vdmpyrs_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vdmpy(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpy_PP_sat __builtin_HEXAGON_M2_vdmpys_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vdmpy(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vdmpy_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpy_PP_s1_sat __builtin_HEXAGON_M2_vdmpys_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhacc_RR __builtin_HEXAGON_M2_vmac2 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyehacc_PP __builtin_HEXAGON_M2_vmac2es + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyehacc_PP_sat __builtin_HEXAGON_M2_vmac2es_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyeh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyehacc_PP_s1_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyehacc_PP_s1_sat __builtin_HEXAGON_M2_vmac2es_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhacc_RR_sat __builtin_HEXAGON_M2_vmac2s_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyh(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2s_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyhsu(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhsuacc_RR_sat __builtin_HEXAGON_M2_vmac2su_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpyhsu(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhsuacc_RR_s1_sat(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhsuacc_RR_s1_sat __builtin_HEXAGON_M2_vmac2su_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyeh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyeh_PP_sat __builtin_HEXAGON_M2_vmpy2es_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyeh(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyeh_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyeh_PP_s1_sat __builtin_HEXAGON_M2_vmpy2es_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyh(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyh_RR_sat __builtin_HEXAGON_M2_vmpy2s_s0 + +/* ========================================================================== + Assembly Syntax: Rd32=vmpyh(Rs32,Rt32):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vmpyh_RR_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s0pack + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyh(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyh_RR_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyh_RR_s1_sat __builtin_HEXAGON_M2_vmpy2s_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=vmpyh(Rs32,Rt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vmpyh_RR_s1_rnd_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vmpyh_RR_s1_rnd_sat __builtin_HEXAGON_M2_vmpy2s_s1pack + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyhsu(Rs32,Rt32):sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhsu_RR_sat __builtin_HEXAGON_M2_vmpy2su_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpyhsu(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vmpyhsu_RR_s1_sat(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpyhsu_RR_s1_sat __builtin_HEXAGON_M2_vmpy2su_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=vraddh(Rss32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_vraddh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vraddh_PP __builtin_HEXAGON_M2_vraddh + +/* ========================================================================== + Assembly Syntax: Rd32=vradduh(Rss32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_vradduh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vradduh_PP __builtin_HEXAGON_M2_vradduh + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcmpyi(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyiacc_PP __builtin_HEXAGON_M2_vrcmaci_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcmpyi(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyiacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyiacc_PP_conj __builtin_HEXAGON_M2_vrcmaci_s0c + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcmpyr(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyracc_PP __builtin_HEXAGON_M2_vrcmacr_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcmpyr(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyracc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyracc_PP_conj __builtin_HEXAGON_M2_vrcmacr_s0c + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcmpyi(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyi_PP __builtin_HEXAGON_M2_vrcmpyi_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcmpyi(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyi_PP_conj(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyi_PP_conj __builtin_HEXAGON_M2_vrcmpyi_s0c + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcmpyr(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyr_PP __builtin_HEXAGON_M2_vrcmpyr_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcmpyr(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_vrcmpyr_PP_conj(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcmpyr_PP_conj __builtin_HEXAGON_M2_vrcmpyr_s0c + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcmpys(Rss32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vrcmpysacc_PR_s1_sat(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_vrcmpysacc_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_acc_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcmpys(Rss32,Rt32):<<1:sat + C Intrinsic Prototype: Word64 Q6_P_vrcmpys_PR_s1_sat(Word64 Rss, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_vrcmpys_PR_s1_sat __builtin_HEXAGON_M2_vrcmpys_s1 + +/* ========================================================================== + Assembly Syntax: Rd32=vrcmpys(Rss32,Rt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vrcmpys_PR_s1_rnd_sat(Word64 Rss, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vrcmpys_PR_s1_rnd_sat __builtin_HEXAGON_M2_vrcmpys_s1rp + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpyh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpyhacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpyhacc_PP __builtin_HEXAGON_M2_vrmac_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpyh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpyh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpyh_PP __builtin_HEXAGON_M2_vrmpy_s0 + +/* ========================================================================== + Assembly Syntax: Rx32^=xor(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_xorxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_xorxacc_RR __builtin_HEXAGON_M2_xor_xacc + +/* ========================================================================== + Assembly Syntax: Rx32&=and(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_andand_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andand_RR __builtin_HEXAGON_M4_and_and + +/* ========================================================================== + Assembly Syntax: Rx32&=and(Rs32,~Rt32) + C Intrinsic Prototype: Word32 Q6_R_andand_RnR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andand_RnR __builtin_HEXAGON_M4_and_andn + +/* ========================================================================== + Assembly Syntax: Rx32&=or(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_orand_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_orand_RR __builtin_HEXAGON_M4_and_or + +/* ========================================================================== + Assembly Syntax: Rx32&=xor(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_xorand_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_xorand_RR __builtin_HEXAGON_M4_and_xor + +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiwh(Rss32,Rt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpyiwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_wh + +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiwh(Rss32,Rt32*):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpyiwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyi_whc + +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrwh(Rss32,Rt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_s1_rnd_sat(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpyrwh_PR_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_wh + +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrwh(Rss32,Rt32*):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrwh_PR_conj_s1_rnd_sat(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cmpyrwh_PR_conj_s1_rnd_sat __builtin_HEXAGON_M4_cmpyr_whc + +/* ========================================================================== + Assembly Syntax: Rx32+=mpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpyacc_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyacc_RR_s1_sat __builtin_HEXAGON_M4_mac_up_s1_sat + +/* ========================================================================== + Assembly Syntax: Rd32=add(#u6,mpyi(Rs32,#U6)) + C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRI(Word32 Iu6, Word32 Rs, Word32 IU6) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_mpyi_IRI __builtin_HEXAGON_M4_mpyri_addi + +/* ========================================================================== + Assembly Syntax: Rd32=add(Ru32,mpyi(Rs32,#u6)) + C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRI(Word32 Ru, Word32 Rs, Word32 Iu6) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_mpyi_RRI __builtin_HEXAGON_M4_mpyri_addr + +/* ========================================================================== + Assembly Syntax: Rd32=add(Ru32,mpyi(#u6:2,Rs32)) + C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RIR(Word32 Ru, Word32 Iu6_2, Word32 Rs) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_mpyi_RIR __builtin_HEXAGON_M4_mpyri_addr_u2 + +/* ========================================================================== + Assembly Syntax: Rd32=add(#u6,mpyi(Rs32,Rt32)) + C Intrinsic Prototype: Word32 Q6_R_add_mpyi_IRR(Word32 Iu6, Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_mpyi_IRR __builtin_HEXAGON_M4_mpyrr_addi + +/* ========================================================================== + Assembly Syntax: Ry32=add(Ru32,mpyi(Ry32,Rs32)) + C Intrinsic Prototype: Word32 Q6_R_add_mpyi_RRR(Word32 Ru, Word32 Ry, Word32 Rs) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_mpyi_RRR __builtin_HEXAGON_M4_mpyrr_addr + +/* ========================================================================== + Assembly Syntax: Rx32-=mpy(Rs32,Rt32):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_mpynac_RR_s1_sat(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpynac_RR_s1_sat __builtin_HEXAGON_M4_nac_up_s1_sat + +/* ========================================================================== + Assembly Syntax: Rx32|=and(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_andor_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andor_RR __builtin_HEXAGON_M4_or_and + +/* ========================================================================== + Assembly Syntax: Rx32|=and(Rs32,~Rt32) + C Intrinsic Prototype: Word32 Q6_R_andor_RnR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andor_RnR __builtin_HEXAGON_M4_or_andn + +/* ========================================================================== + Assembly Syntax: Rx32|=or(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_oror_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_oror_RR __builtin_HEXAGON_M4_or_or + +/* ========================================================================== + Assembly Syntax: Rx32|=xor(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_xoror_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_xoror_RR __builtin_HEXAGON_M4_or_xor + +/* ========================================================================== + Assembly Syntax: Rdd32=pmpyw(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_pmpyw_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_pmpyw_RR __builtin_HEXAGON_M4_pmpyw + +/* ========================================================================== + Assembly Syntax: Rxx32^=pmpyw(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_pmpywxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_pmpywxacc_RR __builtin_HEXAGON_M4_pmpyw_acc + +/* ========================================================================== + Assembly Syntax: Rdd32=vpmpyh(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vpmpyh_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vpmpyh_RR __builtin_HEXAGON_M4_vpmpyh + +/* ========================================================================== + Assembly Syntax: Rxx32^=vpmpyh(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vpmpyhxacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vpmpyhxacc_RR __builtin_HEXAGON_M4_vpmpyh_acc + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpyweh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywehacc_PP __builtin_HEXAGON_M4_vrmpyeh_acc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpyweh(Rss32,Rtt32):<<1 + C Intrinsic Prototype: Word64 Q6_P_vrmpywehacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywehacc_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_acc_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpyweh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpyweh_PP __builtin_HEXAGON_M4_vrmpyeh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpyweh(Rss32,Rtt32):<<1 + C Intrinsic Prototype: Word64 Q6_P_vrmpyweh_PP_s1(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpyweh_PP_s1 __builtin_HEXAGON_M4_vrmpyeh_s1 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpywoh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywohacc_PP __builtin_HEXAGON_M4_vrmpyoh_acc_s0 + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpywoh(Rss32,Rtt32):<<1 + C Intrinsic Prototype: Word64 Q6_P_vrmpywohacc_PP_s1(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywohacc_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_acc_s1 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpywoh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywoh_PP __builtin_HEXAGON_M4_vrmpyoh_s0 + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpywoh(Rss32,Rtt32):<<1 + C Intrinsic Prototype: Word64 Q6_P_vrmpywoh_PP_s1(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpywoh_PP_s1 __builtin_HEXAGON_M4_vrmpyoh_s1 + +/* ========================================================================== + Assembly Syntax: Rx32^=and(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_andxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andxacc_RR __builtin_HEXAGON_M4_xor_and + +/* ========================================================================== + Assembly Syntax: Rx32^=and(Rs32,~Rt32) + C Intrinsic Prototype: Word32 Q6_R_andxacc_RnR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andxacc_RnR __builtin_HEXAGON_M4_xor_andn + +/* ========================================================================== + Assembly Syntax: Rx32^=or(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_orxacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_orxacc_RR __builtin_HEXAGON_M4_xor_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=xor(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_xorxacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_xorxacc_PP __builtin_HEXAGON_M4_xor_xacc + +/* ========================================================================== + Assembly Syntax: Rxx32+=vdmpybsu(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vdmpybsuacc_PP_sat(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpybsuacc_PP_sat __builtin_HEXAGON_M5_vdmacbsu + +/* ========================================================================== + Assembly Syntax: Rdd32=vdmpybsu(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vdmpybsu_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vdmpybsu_PP_sat __builtin_HEXAGON_M5_vdmpybsu + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpybsu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vmpybsuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpybsuacc_RR __builtin_HEXAGON_M5_vmacbsu + +/* ========================================================================== + Assembly Syntax: Rxx32+=vmpybu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vmpybuacc_RR(Word64 Rxx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpybuacc_RR __builtin_HEXAGON_M5_vmacbuu + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpybsu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vmpybsu_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpybsu_RR __builtin_HEXAGON_M5_vmpybsu + +/* ========================================================================== + Assembly Syntax: Rdd32=vmpybu(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vmpybu_RR(Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vmpybu_RR __builtin_HEXAGON_M5_vmpybuu + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpybsu(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpybsuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpybsuacc_PP __builtin_HEXAGON_M5_vrmacbsu + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrmpybu(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpybuacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpybuacc_PP __builtin_HEXAGON_M5_vrmacbuu + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpybsu(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpybsu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpybsu_PP __builtin_HEXAGON_M5_vrmpybsu + +/* ========================================================================== + Assembly Syntax: Rdd32=vrmpybu(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vrmpybu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrmpybu_PP __builtin_HEXAGON_M5_vrmpybuu + +/* ========================================================================== + Assembly Syntax: Rd32=addasl(Rt32,Rs32,#u3) + C Intrinsic Prototype: Word32 Q6_R_addasl_RRI(Word32 Rt, Word32 Rs, Word32 Iu3) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_addasl_RRI __builtin_HEXAGON_S2_addasl_rrri + +/* ========================================================================== + Assembly Syntax: Rdd32=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asl_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asl_PI __builtin_HEXAGON_S2_asl_i_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_aslacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslacc_PI __builtin_HEXAGON_S2_asl_i_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asland_PI __builtin_HEXAGON_S2_asl_i_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_aslnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslnac_PI __builtin_HEXAGON_S2_asl_i_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_aslor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslor_PI __builtin_HEXAGON_S2_asl_i_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=asl(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_aslxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslxacc_PI __builtin_HEXAGON_S2_asl_i_p_xacc + +/* ========================================================================== + Assembly Syntax: Rd32=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asl_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asl_RI __builtin_HEXAGON_S2_asl_i_r + +/* ========================================================================== + Assembly Syntax: Rx32+=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_aslacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslacc_RI __builtin_HEXAGON_S2_asl_i_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asland_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asland_RI __builtin_HEXAGON_S2_asl_i_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_aslnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslnac_RI __builtin_HEXAGON_S2_asl_i_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_aslor_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslor_RI __builtin_HEXAGON_S2_asl_i_r_or + +/* ========================================================================== + Assembly Syntax: Rd32=asl(Rs32,#u5):sat + C Intrinsic Prototype: Word32 Q6_R_asl_RI_sat(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asl_RI_sat __builtin_HEXAGON_S2_asl_i_r_sat + +/* ========================================================================== + Assembly Syntax: Rx32^=asl(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_aslxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslxacc_RI __builtin_HEXAGON_S2_asl_i_r_xacc + +/* ========================================================================== + Assembly Syntax: Rdd32=vaslh(Rss32,#u4) + C Intrinsic Prototype: Word64 Q6_P_vaslh_PI(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaslh_PI __builtin_HEXAGON_S2_asl_i_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vaslw(Rss32,#u5) + C Intrinsic Prototype: Word64 Q6_P_vaslw_PI(Word64 Rss, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaslw_PI __builtin_HEXAGON_S2_asl_i_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asl_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asl_PR __builtin_HEXAGON_S2_asl_r_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_aslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslacc_PR __builtin_HEXAGON_S2_asl_r_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asland_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asland_PR __builtin_HEXAGON_S2_asl_r_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_aslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslnac_PR __builtin_HEXAGON_S2_asl_r_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_aslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslor_PR __builtin_HEXAGON_S2_asl_r_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=asl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_aslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_aslxacc_PR __builtin_HEXAGON_S2_asl_r_p_xor + +/* ========================================================================== + Assembly Syntax: Rd32=asl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asl_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asl_RR __builtin_HEXAGON_S2_asl_r_r + +/* ========================================================================== + Assembly Syntax: Rx32+=asl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_aslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslacc_RR __builtin_HEXAGON_S2_asl_r_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=asl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asland_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asland_RR __builtin_HEXAGON_S2_asl_r_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=asl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_aslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslnac_RR __builtin_HEXAGON_S2_asl_r_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=asl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_aslor_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_aslor_RR __builtin_HEXAGON_S2_asl_r_r_or + +/* ========================================================================== + Assembly Syntax: Rd32=asl(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_asl_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asl_RR_sat __builtin_HEXAGON_S2_asl_r_r_sat + +/* ========================================================================== + Assembly Syntax: Rdd32=vaslh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vaslh_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaslh_PR __builtin_HEXAGON_S2_asl_r_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vaslw(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vaslw_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vaslw_PR __builtin_HEXAGON_S2_asl_r_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=asr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asr_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asr_PI __builtin_HEXAGON_S2_asr_i_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=asr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asracc_PI __builtin_HEXAGON_S2_asr_i_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=asr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asrand_PI __builtin_HEXAGON_S2_asr_i_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=asr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asrnac_PI __builtin_HEXAGON_S2_asr_i_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=asr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asror_PI __builtin_HEXAGON_S2_asr_i_p_or + +/* ========================================================================== + Assembly Syntax: Rdd32=asr(Rss32,#u6):rnd + C Intrinsic Prototype: Word64 Q6_P_asr_PI_rnd(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asr_PI_rnd __builtin_HEXAGON_S2_asr_i_p_rnd + +/* ========================================================================== + Assembly Syntax: Rdd32=asrrnd(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_asrrnd_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_asrrnd_PI __builtin_HEXAGON_S2_asr_i_p_rnd_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rd32=asr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asr_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asr_RI __builtin_HEXAGON_S2_asr_i_r + +/* ========================================================================== + Assembly Syntax: Rx32+=asr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asracc_RI __builtin_HEXAGON_S2_asr_i_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=asr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asrand_RI __builtin_HEXAGON_S2_asr_i_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=asr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asrnac_RI __builtin_HEXAGON_S2_asr_i_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=asr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asror_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asror_RI __builtin_HEXAGON_S2_asr_i_r_or + +/* ========================================================================== + Assembly Syntax: Rd32=asr(Rs32,#u5):rnd + C Intrinsic Prototype: Word32 Q6_R_asr_RI_rnd(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asr_RI_rnd __builtin_HEXAGON_S2_asr_i_r_rnd + +/* ========================================================================== + Assembly Syntax: Rd32=asrrnd(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_asrrnd_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_asrrnd_RI __builtin_HEXAGON_S2_asr_i_r_rnd_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rd32=vasrw(Rss32,#u5) + C Intrinsic Prototype: Word32 Q6_R_vasrw_PI(Word64 Rss, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vasrw_PI __builtin_HEXAGON_S2_asr_i_svw_trun + +/* ========================================================================== + Assembly Syntax: Rdd32=vasrh(Rss32,#u4) + C Intrinsic Prototype: Word64 Q6_P_vasrh_PI(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vasrh_PI __builtin_HEXAGON_S2_asr_i_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vasrw(Rss32,#u5) + C Intrinsic Prototype: Word64 Q6_P_vasrw_PI(Word64 Rss, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vasrw_PI __builtin_HEXAGON_S2_asr_i_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asr_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asr_PR __builtin_HEXAGON_S2_asr_r_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asracc_PR __builtin_HEXAGON_S2_asr_r_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asrand_PR __builtin_HEXAGON_S2_asr_r_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asrnac_PR __builtin_HEXAGON_S2_asr_r_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asror_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asror_PR __builtin_HEXAGON_S2_asr_r_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=asr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_asrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_asrxacc_PR __builtin_HEXAGON_S2_asr_r_p_xor + +/* ========================================================================== + Assembly Syntax: Rd32=asr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asr_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asr_RR __builtin_HEXAGON_S2_asr_r_r + +/* ========================================================================== + Assembly Syntax: Rx32+=asr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asracc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asracc_RR __builtin_HEXAGON_S2_asr_r_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=asr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asrand_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asrand_RR __builtin_HEXAGON_S2_asr_r_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=asr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asrnac_RR __builtin_HEXAGON_S2_asr_r_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=asr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_asror_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asror_RR __builtin_HEXAGON_S2_asr_r_r_or + +/* ========================================================================== + Assembly Syntax: Rd32=asr(Rs32,Rt32):sat + C Intrinsic Prototype: Word32 Q6_R_asr_RR_sat(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_asr_RR_sat __builtin_HEXAGON_S2_asr_r_r_sat + +/* ========================================================================== + Assembly Syntax: Rd32=vasrw(Rss32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_vasrw_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vasrw_PR __builtin_HEXAGON_S2_asr_r_svw_trun + +/* ========================================================================== + Assembly Syntax: Rdd32=vasrh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vasrh_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vasrh_PR __builtin_HEXAGON_S2_asr_r_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vasrw(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vasrw_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vasrw_PR __builtin_HEXAGON_S2_asr_r_vw + +/* ========================================================================== + Assembly Syntax: Rd32=brev(Rs32) + C Intrinsic Prototype: Word32 Q6_R_brev_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_brev_R __builtin_HEXAGON_S2_brev + +/* ========================================================================== + Assembly Syntax: Rdd32=brev(Rss32) + C Intrinsic Prototype: Word64 Q6_P_brev_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_brev_P __builtin_HEXAGON_S2_brevp + +/* ========================================================================== + Assembly Syntax: Rd32=cl0(Rs32) + C Intrinsic Prototype: Word32 Q6_R_cl0_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cl0_R __builtin_HEXAGON_S2_cl0 + +/* ========================================================================== + Assembly Syntax: Rd32=cl0(Rss32) + C Intrinsic Prototype: Word32 Q6_R_cl0_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cl0_P __builtin_HEXAGON_S2_cl0p + +/* ========================================================================== + Assembly Syntax: Rd32=cl1(Rs32) + C Intrinsic Prototype: Word32 Q6_R_cl1_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cl1_R __builtin_HEXAGON_S2_cl1 + +/* ========================================================================== + Assembly Syntax: Rd32=cl1(Rss32) + C Intrinsic Prototype: Word32 Q6_R_cl1_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_cl1_P __builtin_HEXAGON_S2_cl1p + +/* ========================================================================== + Assembly Syntax: Rd32=clb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_clb_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_clb_R __builtin_HEXAGON_S2_clb + +/* ========================================================================== + Assembly Syntax: Rd32=normamt(Rs32) + C Intrinsic Prototype: Word32 Q6_R_normamt_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_normamt_R __builtin_HEXAGON_S2_clbnorm + +/* ========================================================================== + Assembly Syntax: Rd32=clb(Rss32) + C Intrinsic Prototype: Word32 Q6_R_clb_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_clb_P __builtin_HEXAGON_S2_clbp + +/* ========================================================================== + Assembly Syntax: Rd32=clrbit(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_clrbit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_clrbit_RI __builtin_HEXAGON_S2_clrbit_i + +/* ========================================================================== + Assembly Syntax: Rd32=clrbit(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_clrbit_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_clrbit_RR __builtin_HEXAGON_S2_clrbit_r + +/* ========================================================================== + Assembly Syntax: Rd32=ct0(Rs32) + C Intrinsic Prototype: Word32 Q6_R_ct0_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_ct0_R __builtin_HEXAGON_S2_ct0 + +/* ========================================================================== + Assembly Syntax: Rd32=ct0(Rss32) + C Intrinsic Prototype: Word32 Q6_R_ct0_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_ct0_P __builtin_HEXAGON_S2_ct0p + +/* ========================================================================== + Assembly Syntax: Rd32=ct1(Rs32) + C Intrinsic Prototype: Word32 Q6_R_ct1_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_ct1_R __builtin_HEXAGON_S2_ct1 + +/* ========================================================================== + Assembly Syntax: Rd32=ct1(Rss32) + C Intrinsic Prototype: Word32 Q6_R_ct1_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_ct1_P __builtin_HEXAGON_S2_ct1p + +/* ========================================================================== + Assembly Syntax: Rdd32=deinterleave(Rss32) + C Intrinsic Prototype: Word64 Q6_P_deinterleave_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_deinterleave_P __builtin_HEXAGON_S2_deinterleave + +/* ========================================================================== + Assembly Syntax: Rd32=extractu(Rs32,#u5,#U5) + C Intrinsic Prototype: Word32 Q6_R_extractu_RII(Word32 Rs, Word32 Iu5, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_extractu_RII __builtin_HEXAGON_S2_extractu + +/* ========================================================================== + Assembly Syntax: Rd32=extractu(Rs32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_extractu_RP(Word32 Rs, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_extractu_RP __builtin_HEXAGON_S2_extractu_rp + +/* ========================================================================== + Assembly Syntax: Rdd32=extractu(Rss32,#u6,#U6) + C Intrinsic Prototype: Word64 Q6_P_extractu_PII(Word64 Rss, Word32 Iu6, Word32 IU6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_extractu_PII __builtin_HEXAGON_S2_extractup + +/* ========================================================================== + Assembly Syntax: Rdd32=extractu(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_extractu_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_extractu_PP __builtin_HEXAGON_S2_extractup_rp + +/* ========================================================================== + Assembly Syntax: Rx32=insert(Rs32,#u5,#U5) + C Intrinsic Prototype: Word32 Q6_R_insert_RII(Word32 Rx, Word32 Rs, Word32 Iu5, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_insert_RII __builtin_HEXAGON_S2_insert + +/* ========================================================================== + Assembly Syntax: Rx32=insert(Rs32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_insert_RP(Word32 Rx, Word32 Rs, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_insert_RP __builtin_HEXAGON_S2_insert_rp + +/* ========================================================================== + Assembly Syntax: Rxx32=insert(Rss32,#u6,#U6) + C Intrinsic Prototype: Word64 Q6_P_insert_PII(Word64 Rxx, Word64 Rss, Word32 Iu6, Word32 IU6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_insert_PII __builtin_HEXAGON_S2_insertp + +/* ========================================================================== + Assembly Syntax: Rxx32=insert(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_insert_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_insert_PP __builtin_HEXAGON_S2_insertp_rp + +/* ========================================================================== + Assembly Syntax: Rdd32=interleave(Rss32) + C Intrinsic Prototype: Word64 Q6_P_interleave_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_interleave_P __builtin_HEXAGON_S2_interleave + +/* ========================================================================== + Assembly Syntax: Rdd32=lfs(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_lfs_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lfs_PP __builtin_HEXAGON_S2_lfsp + +/* ========================================================================== + Assembly Syntax: Rdd32=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsl_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsl_PR __builtin_HEXAGON_S2_lsl_r_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lslacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lslacc_PR __builtin_HEXAGON_S2_lsl_r_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsland_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsland_PR __builtin_HEXAGON_S2_lsl_r_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lslnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lslnac_PR __builtin_HEXAGON_S2_lsl_r_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lslor_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lslor_PR __builtin_HEXAGON_S2_lsl_r_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=lsl(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lslxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lslxacc_PR __builtin_HEXAGON_S2_lsl_r_p_xor + +/* ========================================================================== + Assembly Syntax: Rd32=lsl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsl_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsl_RR __builtin_HEXAGON_S2_lsl_r_r + +/* ========================================================================== + Assembly Syntax: Rx32+=lsl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lslacc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lslacc_RR __builtin_HEXAGON_S2_lsl_r_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=lsl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsland_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsland_RR __builtin_HEXAGON_S2_lsl_r_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=lsl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lslnac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lslnac_RR __builtin_HEXAGON_S2_lsl_r_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=lsl(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lslor_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lslor_RR __builtin_HEXAGON_S2_lsl_r_r_or + +/* ========================================================================== + Assembly Syntax: Rdd32=vlslh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vlslh_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlslh_PR __builtin_HEXAGON_S2_lsl_r_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vlslw(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vlslw_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlslw_PR __builtin_HEXAGON_S2_lsl_r_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsr_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsr_PI __builtin_HEXAGON_S2_lsr_i_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsracc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsracc_PI __builtin_HEXAGON_S2_lsr_i_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsrand_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrand_PI __builtin_HEXAGON_S2_lsr_i_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsrnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrnac_PI __builtin_HEXAGON_S2_lsr_i_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsror_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsror_PI __builtin_HEXAGON_S2_lsr_i_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=lsr(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrxacc_PI __builtin_HEXAGON_S2_lsr_i_p_xacc + +/* ========================================================================== + Assembly Syntax: Rd32=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsr_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsr_RI __builtin_HEXAGON_S2_lsr_i_r + +/* ========================================================================== + Assembly Syntax: Rx32+=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsracc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsracc_RI __builtin_HEXAGON_S2_lsr_i_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsrand_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsrand_RI __builtin_HEXAGON_S2_lsr_i_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsrnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsrnac_RI __builtin_HEXAGON_S2_lsr_i_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsror_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsror_RI __builtin_HEXAGON_S2_lsr_i_r_or + +/* ========================================================================== + Assembly Syntax: Rx32^=lsr(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_lsrxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsrxacc_RI __builtin_HEXAGON_S2_lsr_i_r_xacc + +/* ========================================================================== + Assembly Syntax: Rdd32=vlsrh(Rss32,#u4) + C Intrinsic Prototype: Word64 Q6_P_vlsrh_PI(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlsrh_PI __builtin_HEXAGON_S2_lsr_i_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vlsrw(Rss32,#u5) + C Intrinsic Prototype: Word64 Q6_P_vlsrw_PI(Word64 Rss, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlsrw_PI __builtin_HEXAGON_S2_lsr_i_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsr_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsr_PR __builtin_HEXAGON_S2_lsr_r_p + +/* ========================================================================== + Assembly Syntax: Rxx32+=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsracc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsracc_PR __builtin_HEXAGON_S2_lsr_r_p_acc + +/* ========================================================================== + Assembly Syntax: Rxx32&=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsrand_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrand_PR __builtin_HEXAGON_S2_lsr_r_p_and + +/* ========================================================================== + Assembly Syntax: Rxx32-=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsrnac_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrnac_PR __builtin_HEXAGON_S2_lsr_r_p_nac + +/* ========================================================================== + Assembly Syntax: Rxx32|=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsror_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsror_PR __builtin_HEXAGON_S2_lsr_r_p_or + +/* ========================================================================== + Assembly Syntax: Rxx32^=lsr(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_lsrxacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_lsrxacc_PR __builtin_HEXAGON_S2_lsr_r_p_xor + +/* ========================================================================== + Assembly Syntax: Rd32=lsr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsr_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsr_RR __builtin_HEXAGON_S2_lsr_r_r + +/* ========================================================================== + Assembly Syntax: Rx32+=lsr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsracc_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsracc_RR __builtin_HEXAGON_S2_lsr_r_r_acc + +/* ========================================================================== + Assembly Syntax: Rx32&=lsr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsrand_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsrand_RR __builtin_HEXAGON_S2_lsr_r_r_and + +/* ========================================================================== + Assembly Syntax: Rx32-=lsr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsrnac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsrnac_RR __builtin_HEXAGON_S2_lsr_r_r_nac + +/* ========================================================================== + Assembly Syntax: Rx32|=lsr(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsror_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsror_RR __builtin_HEXAGON_S2_lsr_r_r_or + +/* ========================================================================== + Assembly Syntax: Rdd32=vlsrh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vlsrh_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlsrh_PR __builtin_HEXAGON_S2_lsr_r_vh + +/* ========================================================================== + Assembly Syntax: Rdd32=vlsrw(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vlsrw_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vlsrw_PR __builtin_HEXAGON_S2_lsr_r_vw + +/* ========================================================================== + Assembly Syntax: Rdd32=packhl(Rs32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_packhl_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU32_3op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_packhl_RR __builtin_HEXAGON_S2_packhl + +/* ========================================================================== + Assembly Syntax: Rd32=parity(Rss32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_parity_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_parity_PP __builtin_HEXAGON_S2_parityp + +/* ========================================================================== + Assembly Syntax: Rd32=setbit(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_setbit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_setbit_RI __builtin_HEXAGON_S2_setbit_i + +/* ========================================================================== + Assembly Syntax: Rd32=setbit(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_setbit_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_setbit_RR __builtin_HEXAGON_S2_setbit_r + +/* ========================================================================== + Assembly Syntax: Rdd32=shuffeb(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_shuffeb_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_shuffeb_PP __builtin_HEXAGON_S2_shuffeb + +/* ========================================================================== + Assembly Syntax: Rdd32=shuffeh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_shuffeh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_shuffeh_PP __builtin_HEXAGON_S2_shuffeh + +/* ========================================================================== + Assembly Syntax: Rdd32=shuffob(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_shuffob_PP(Word64 Rtt, Word64 Rss) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_shuffob_PP __builtin_HEXAGON_S2_shuffob + +/* ========================================================================== + Assembly Syntax: Rdd32=shuffoh(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_shuffoh_PP(Word64 Rtt, Word64 Rss) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_shuffoh_PP __builtin_HEXAGON_S2_shuffoh + +/* ========================================================================== + Assembly Syntax: memb(Rx32++#s4:0:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memb_IMR_circ(void** Rx, Word32 Is4_0, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memb_IMR_circ __builtin_HEXAGON_S2_storerb_pci + +/* ========================================================================== + Assembly Syntax: memb(Rx32++I:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memb_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memb_MR_circ __builtin_HEXAGON_S2_storerb_pcr + +/* ========================================================================== + Assembly Syntax: memd(Rx32++#s4:3:circ(Mu2))=Rtt32 + C Intrinsic Prototype: void Q6_memd_IMP_circ(void** Rx, Word32 Is4_3, Word32 Mu, Word64 Rtt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memd_IMP_circ __builtin_HEXAGON_S2_storerd_pci + +/* ========================================================================== + Assembly Syntax: memd(Rx32++I:circ(Mu2))=Rtt32 + C Intrinsic Prototype: void Q6_memd_MP_circ(void** Rx, Word32 Mu, Word64 Rtt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memd_MP_circ __builtin_HEXAGON_S2_storerd_pcr + +/* ========================================================================== + Assembly Syntax: memh(Rx32++#s4:1:circ(Mu2))=Rt32.h + C Intrinsic Prototype: void Q6_memh_IMRh_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memh_IMRh_circ __builtin_HEXAGON_S2_storerf_pci + +/* ========================================================================== + Assembly Syntax: memh(Rx32++I:circ(Mu2))=Rt32.h + C Intrinsic Prototype: void Q6_memh_MRh_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memh_MRh_circ __builtin_HEXAGON_S2_storerf_pcr + +/* ========================================================================== + Assembly Syntax: memh(Rx32++#s4:1:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memh_IMR_circ(void** Rx, Word32 Is4_1, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memh_IMR_circ __builtin_HEXAGON_S2_storerh_pci + +/* ========================================================================== + Assembly Syntax: memh(Rx32++I:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memh_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memh_MR_circ __builtin_HEXAGON_S2_storerh_pcr + +/* ========================================================================== + Assembly Syntax: memw(Rx32++#s4:2:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memw_IMR_circ(void** Rx, Word32 Is4_2, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memw_IMR_circ __builtin_HEXAGON_S2_storeri_pci + +/* ========================================================================== + Assembly Syntax: memw(Rx32++I:circ(Mu2))=Rt32 + C Intrinsic Prototype: void Q6_memw_MR_circ(void** Rx, Word32 Mu, Word32 Rt, void* BaseAddress) + Instruction Type: ST + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_memw_MR_circ __builtin_HEXAGON_S2_storeri_pcr + +/* ========================================================================== + Assembly Syntax: Rd32=vsathb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_vsathb_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsathb_R __builtin_HEXAGON_S2_svsathb + +/* ========================================================================== + Assembly Syntax: Rd32=vsathub(Rs32) + C Intrinsic Prototype: Word32 Q6_R_vsathub_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsathub_R __builtin_HEXAGON_S2_svsathub + +/* ========================================================================== + Assembly Syntax: Rx32=tableidxb(Rs32,#u4,#U5) + C Intrinsic Prototype: Word32 Q6_R_tableidxb_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_tableidxb_RII __builtin_HEXAGON_S2_tableidxb_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rx32=tableidxd(Rs32,#u4,#U5) + C Intrinsic Prototype: Word32 Q6_R_tableidxd_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_tableidxd_RII __builtin_HEXAGON_S2_tableidxd_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rx32=tableidxh(Rs32,#u4,#U5) + C Intrinsic Prototype: Word32 Q6_R_tableidxh_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_tableidxh_RII __builtin_HEXAGON_S2_tableidxh_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rx32=tableidxw(Rs32,#u4,#U5) + C Intrinsic Prototype: Word32 Q6_R_tableidxw_RII(Word32 Rx, Word32 Rs, Word32 Iu4, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_tableidxw_RII __builtin_HEXAGON_S2_tableidxw_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rd32=togglebit(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_togglebit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_togglebit_RI __builtin_HEXAGON_S2_togglebit_i + +/* ========================================================================== + Assembly Syntax: Rd32=togglebit(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_togglebit_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_togglebit_RR __builtin_HEXAGON_S2_togglebit_r + +/* ========================================================================== + Assembly Syntax: Pd4=tstbit(Rs32,#u5) + C Intrinsic Prototype: Byte Q6_p_tstbit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_tstbit_RI __builtin_HEXAGON_S2_tstbit_i + +/* ========================================================================== + Assembly Syntax: Pd4=tstbit(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_tstbit_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_tstbit_RR __builtin_HEXAGON_S2_tstbit_r + +/* ========================================================================== + Assembly Syntax: Rdd32=valignb(Rtt32,Rss32,#u3) + C Intrinsic Prototype: Word64 Q6_P_valignb_PPI(Word64 Rtt, Word64 Rss, Word32 Iu3) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_valignb_PPI __builtin_HEXAGON_S2_valignib + +/* ========================================================================== + Assembly Syntax: Rdd32=valignb(Rtt32,Rss32,Pu4) + C Intrinsic Prototype: Word64 Q6_P_valignb_PPp(Word64 Rtt, Word64 Rss, Byte Pu) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_valignb_PPp __builtin_HEXAGON_S2_valignrb + +/* ========================================================================== + Assembly Syntax: Rdd32=vcnegh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vcnegh_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcnegh_PR __builtin_HEXAGON_S2_vcnegh + +/* ========================================================================== + Assembly Syntax: Rdd32=vcrotate(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vcrotate_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vcrotate_PR __builtin_HEXAGON_S2_vcrotate + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcnegh(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_vrcneghacc_PR(Word64 Rxx, Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcneghacc_PR __builtin_HEXAGON_S2_vrcnegh + +/* ========================================================================== + Assembly Syntax: Rd32=vrndwh(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vrndwh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vrndwh_P __builtin_HEXAGON_S2_vrndpackwh + +/* ========================================================================== + Assembly Syntax: Rd32=vrndwh(Rss32):sat + C Intrinsic Prototype: Word32 Q6_R_vrndwh_P_sat(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vrndwh_P_sat __builtin_HEXAGON_S2_vrndpackwhs + +/* ========================================================================== + Assembly Syntax: Rd32=vsathb(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vsathb_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsathb_P __builtin_HEXAGON_S2_vsathb + +/* ========================================================================== + Assembly Syntax: Rdd32=vsathb(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsathb_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsathb_P __builtin_HEXAGON_S2_vsathb_nopack + +/* ========================================================================== + Assembly Syntax: Rd32=vsathub(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vsathub_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsathub_P __builtin_HEXAGON_S2_vsathub + +/* ========================================================================== + Assembly Syntax: Rdd32=vsathub(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsathub_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsathub_P __builtin_HEXAGON_S2_vsathub_nopack + +/* ========================================================================== + Assembly Syntax: Rd32=vsatwh(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vsatwh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsatwh_P __builtin_HEXAGON_S2_vsatwh + +/* ========================================================================== + Assembly Syntax: Rdd32=vsatwh(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsatwh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsatwh_P __builtin_HEXAGON_S2_vsatwh_nopack + +/* ========================================================================== + Assembly Syntax: Rd32=vsatwuh(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vsatwuh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh + +/* ========================================================================== + Assembly Syntax: Rdd32=vsatwuh(Rss32) + C Intrinsic Prototype: Word64 Q6_P_vsatwuh_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsatwuh_P __builtin_HEXAGON_S2_vsatwuh_nopack + +/* ========================================================================== + Assembly Syntax: Rd32=vsplatb(Rs32) + C Intrinsic Prototype: Word32 Q6_R_vsplatb_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vsplatb_R __builtin_HEXAGON_S2_vsplatrb + +/* ========================================================================== + Assembly Syntax: Rdd32=vsplath(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vsplath_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsplath_R __builtin_HEXAGON_S2_vsplatrh + +/* ========================================================================== + Assembly Syntax: Rdd32=vspliceb(Rss32,Rtt32,#u3) + C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPI(Word64 Rss, Word64 Rtt, Word32 Iu3) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vspliceb_PPI __builtin_HEXAGON_S2_vspliceib + +/* ========================================================================== + Assembly Syntax: Rdd32=vspliceb(Rss32,Rtt32,Pu4) + C Intrinsic Prototype: Word64 Q6_P_vspliceb_PPp(Word64 Rss, Word64 Rtt, Byte Pu) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vspliceb_PPp __builtin_HEXAGON_S2_vsplicerb + +/* ========================================================================== + Assembly Syntax: Rdd32=vsxtbh(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vsxtbh_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsxtbh_R __builtin_HEXAGON_S2_vsxtbh + +/* ========================================================================== + Assembly Syntax: Rdd32=vsxthw(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vsxthw_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsxthw_R __builtin_HEXAGON_S2_vsxthw + +/* ========================================================================== + Assembly Syntax: Rd32=vtrunehb(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vtrunehb_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vtrunehb_P __builtin_HEXAGON_S2_vtrunehb + +/* ========================================================================== + Assembly Syntax: Rdd32=vtrunewh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vtrunewh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vtrunewh_PP __builtin_HEXAGON_S2_vtrunewh + +/* ========================================================================== + Assembly Syntax: Rd32=vtrunohb(Rss32) + C Intrinsic Prototype: Word32 Q6_R_vtrunohb_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vtrunohb_P __builtin_HEXAGON_S2_vtrunohb + +/* ========================================================================== + Assembly Syntax: Rdd32=vtrunowh(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vtrunowh_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vtrunowh_PP __builtin_HEXAGON_S2_vtrunowh + +/* ========================================================================== + Assembly Syntax: Rdd32=vzxtbh(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vzxtbh_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vzxtbh_R __builtin_HEXAGON_S2_vzxtbh + +/* ========================================================================== + Assembly Syntax: Rdd32=vzxthw(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vzxthw_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vzxthw_R __builtin_HEXAGON_S2_vzxthw + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rs32,add(Ru32,#s6)) + C Intrinsic Prototype: Word32 Q6_R_add_add_RRI(Word32 Rs, Word32 Ru, Word32 Is6) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_add_RRI __builtin_HEXAGON_S4_addaddi + +/* ========================================================================== + Assembly Syntax: Rx32=add(#u8,asl(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_add_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_asl_IRI __builtin_HEXAGON_S4_addi_asl_ri + +/* ========================================================================== + Assembly Syntax: Rx32=add(#u8,lsr(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_add_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_lsr_IRI __builtin_HEXAGON_S4_addi_lsr_ri + +/* ========================================================================== + Assembly Syntax: Rx32=and(#u8,asl(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_and_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_and_asl_IRI __builtin_HEXAGON_S4_andi_asl_ri + +/* ========================================================================== + Assembly Syntax: Rx32=and(#u8,lsr(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_and_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_and_lsr_IRI __builtin_HEXAGON_S4_andi_lsr_ri + +/* ========================================================================== + Assembly Syntax: Rd32=add(clb(Rs32),#s6) + C Intrinsic Prototype: Word32 Q6_R_add_clb_RI(Word32 Rs, Word32 Is6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_clb_RI __builtin_HEXAGON_S4_clbaddi + +/* ========================================================================== + Assembly Syntax: Rd32=add(clb(Rss32),#s6) + C Intrinsic Prototype: Word32 Q6_R_add_clb_PI(Word64 Rss, Word32 Is6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_clb_PI __builtin_HEXAGON_S4_clbpaddi + +/* ========================================================================== + Assembly Syntax: Rd32=normamt(Rss32) + C Intrinsic Prototype: Word32 Q6_R_normamt_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_normamt_P __builtin_HEXAGON_S4_clbpnorm + +/* ========================================================================== + Assembly Syntax: Rd32=extract(Rs32,#u5,#U5) + C Intrinsic Prototype: Word32 Q6_R_extract_RII(Word32 Rs, Word32 Iu5, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_extract_RII __builtin_HEXAGON_S4_extract + +/* ========================================================================== + Assembly Syntax: Rd32=extract(Rs32,Rtt32) + C Intrinsic Prototype: Word32 Q6_R_extract_RP(Word32 Rs, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_extract_RP __builtin_HEXAGON_S4_extract_rp + +/* ========================================================================== + Assembly Syntax: Rdd32=extract(Rss32,#u6,#U6) + C Intrinsic Prototype: Word64 Q6_P_extract_PII(Word64 Rss, Word32 Iu6, Word32 IU6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_extract_PII __builtin_HEXAGON_S4_extractp + +/* ========================================================================== + Assembly Syntax: Rdd32=extract(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_extract_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_extract_PP __builtin_HEXAGON_S4_extractp_rp + +/* ========================================================================== + Assembly Syntax: Rd32=lsl(#s6,Rt32) + C Intrinsic Prototype: Word32 Q6_R_lsl_IR(Word32 Is6, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_lsl_IR __builtin_HEXAGON_S4_lsli + +/* ========================================================================== + Assembly Syntax: Pd4=!tstbit(Rs32,#u5) + C Intrinsic Prototype: Byte Q6_p_not_tstbit_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_tstbit_RI __builtin_HEXAGON_S4_ntstbit_i + +/* ========================================================================== + Assembly Syntax: Pd4=!tstbit(Rs32,Rt32) + C Intrinsic Prototype: Byte Q6_p_not_tstbit_RR(Word32 Rs, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_tstbit_RR __builtin_HEXAGON_S4_ntstbit_r + +/* ========================================================================== + Assembly Syntax: Rx32|=and(Rs32,#s10) + C Intrinsic Prototype: Word32 Q6_R_andor_RI(Word32 Rx, Word32 Rs, Word32 Is10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_andor_RI __builtin_HEXAGON_S4_or_andi + +/* ========================================================================== + Assembly Syntax: Rx32=or(Ru32,and(Rx32,#s10)) + C Intrinsic Prototype: Word32 Q6_R_or_and_RRI(Word32 Ru, Word32 Rx, Word32 Is10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_or_and_RRI __builtin_HEXAGON_S4_or_andix + +/* ========================================================================== + Assembly Syntax: Rx32|=or(Rs32,#s10) + C Intrinsic Prototype: Word32 Q6_R_oror_RI(Word32 Rx, Word32 Rs, Word32 Is10) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_oror_RI __builtin_HEXAGON_S4_or_ori + +/* ========================================================================== + Assembly Syntax: Rx32=or(#u8,asl(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_or_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_or_asl_IRI __builtin_HEXAGON_S4_ori_asl_ri + +/* ========================================================================== + Assembly Syntax: Rx32=or(#u8,lsr(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_or_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_or_lsr_IRI __builtin_HEXAGON_S4_ori_lsr_ri + +/* ========================================================================== + Assembly Syntax: Rd32=parity(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_parity_RR(Word32 Rs, Word32 Rt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_parity_RR __builtin_HEXAGON_S4_parity + +/* ========================================================================== + Assembly Syntax: Rd32=add(Rs32,sub(#s6,Ru32)) + C Intrinsic Prototype: Word32 Q6_R_add_sub_RIR(Word32 Rs, Word32 Is6, Word32 Ru) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_add_sub_RIR __builtin_HEXAGON_S4_subaddi + +/* ========================================================================== + Assembly Syntax: Rx32=sub(#u8,asl(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_sub_asl_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_asl_IRI __builtin_HEXAGON_S4_subi_asl_ri + +/* ========================================================================== + Assembly Syntax: Rx32=sub(#u8,lsr(Rx32,#U5)) + C Intrinsic Prototype: Word32 Q6_R_sub_lsr_IRI(Word32 Iu8, Word32 Rx, Word32 IU5) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_sub_lsr_IRI __builtin_HEXAGON_S4_subi_lsr_ri + +/* ========================================================================== + Assembly Syntax: Rdd32=vrcrotate(Rss32,Rt32,#u2) + C Intrinsic Prototype: Word64 Q6_P_vrcrotate_PRI(Word64 Rss, Word32 Rt, Word32 Iu2) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcrotate_PRI __builtin_HEXAGON_S4_vrcrotate + +/* ========================================================================== + Assembly Syntax: Rxx32+=vrcrotate(Rss32,Rt32,#u2) + C Intrinsic Prototype: Word64 Q6_P_vrcrotateacc_PRI(Word64 Rxx, Word64 Rss, Word32 Rt, Word32 Iu2) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vrcrotateacc_PRI __builtin_HEXAGON_S4_vrcrotate_acc + +/* ========================================================================== + Assembly Syntax: Rdd32=vxaddsubh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxaddsubh_PP_sat __builtin_HEXAGON_S4_vxaddsubh + +/* ========================================================================== + Assembly Syntax: Rdd32=vxaddsubh(Rss32,Rtt32):rnd:>>1:sat + C Intrinsic Prototype: Word64 Q6_P_vxaddsubh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxaddsubh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxaddsubhr + +/* ========================================================================== + Assembly Syntax: Rdd32=vxaddsubw(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vxaddsubw_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxaddsubw_PP_sat __builtin_HEXAGON_S4_vxaddsubw + +/* ========================================================================== + Assembly Syntax: Rdd32=vxsubaddh(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxsubaddh_PP_sat __builtin_HEXAGON_S4_vxsubaddh + +/* ========================================================================== + Assembly Syntax: Rdd32=vxsubaddh(Rss32,Rtt32):rnd:>>1:sat + C Intrinsic Prototype: Word64 Q6_P_vxsubaddh_PP_rnd_rs1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxsubaddh_PP_rnd_rs1_sat __builtin_HEXAGON_S4_vxsubaddhr + +/* ========================================================================== + Assembly Syntax: Rdd32=vxsubaddw(Rss32,Rtt32):sat + C Intrinsic Prototype: Word64 Q6_P_vxsubaddw_PP_sat(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vxsubaddw_PP_sat __builtin_HEXAGON_S4_vxsubaddw + +/* ========================================================================== + Assembly Syntax: Rd32=vasrhub(Rss32,#u4):rnd:sat + C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_rnd_sat(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_R_vasrhub_PI_rnd_sat __builtin_HEXAGON_S5_asrhub_rnd_sat_goodsyntax + +/* ========================================================================== + Assembly Syntax: Rd32=vasrhub(Rss32,#u4):sat + C Intrinsic Prototype: Word32 Q6_R_vasrhub_PI_sat(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_vasrhub_PI_sat __builtin_HEXAGON_S5_asrhub_sat + +/* ========================================================================== + Assembly Syntax: Rd32=popcount(Rss32) + C Intrinsic Prototype: Word32 Q6_R_popcount_P(Word64 Rss) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_popcount_P __builtin_HEXAGON_S5_popcountp + +/* ========================================================================== + Assembly Syntax: Rdd32=vasrh(Rss32,#u4):rnd + C Intrinsic Prototype: Word64 Q6_P_vasrh_PI_rnd(Word64 Rss, Word32 Iu4) + Instruction Type: S_2op + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_P_vasrh_PI_rnd __builtin_HEXAGON_S5_vasrhrnd_goodsyntax + +/* ========================================================================== + Assembly Syntax: dccleana(Rs32) + C Intrinsic Prototype: void Q6_dccleana_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dccleana_A __builtin_HEXAGON_Y2_dccleana + +/* ========================================================================== + Assembly Syntax: dccleaninva(Rs32) + C Intrinsic Prototype: void Q6_dccleaninva_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dccleaninva_A __builtin_HEXAGON_Y2_dccleaninva + +/* ========================================================================== + Assembly Syntax: dcfetch(Rs32) + C Intrinsic Prototype: void Q6_dcfetch_A(Address Rs) + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_dcfetch_A __builtin_HEXAGON_Y2_dcfetch + +/* ========================================================================== + Assembly Syntax: dcinva(Rs32) + C Intrinsic Prototype: void Q6_dcinva_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dcinva_A __builtin_HEXAGON_Y2_dcinva + +/* ========================================================================== + Assembly Syntax: dczeroa(Rs32) + C Intrinsic Prototype: void Q6_dczeroa_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dczeroa_A __builtin_HEXAGON_Y2_dczeroa + +/* ========================================================================== + Assembly Syntax: l2fetch(Rs32,Rt32) + C Intrinsic Prototype: void Q6_l2fetch_AR(Address Rs, Word32 Rt) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_l2fetch_AR __builtin_HEXAGON_Y4_l2fetch + +/* ========================================================================== + Assembly Syntax: l2fetch(Rs32,Rtt32) + C Intrinsic Prototype: void Q6_l2fetch_AP(Address Rs, Word64 Rtt) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_l2fetch_AP __builtin_HEXAGON_Y5_l2fetch + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rdd32=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_rol_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_rol_PI __builtin_HEXAGON_S6_rol_i_p +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rxx32+=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_rolacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_rolacc_PI __builtin_HEXAGON_S6_rol_i_p_acc +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rxx32&=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_roland_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_roland_PI __builtin_HEXAGON_S6_rol_i_p_and +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rxx32-=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_rolnac_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_rolnac_PI __builtin_HEXAGON_S6_rol_i_p_nac +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rxx32|=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_rolor_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_rolor_PI __builtin_HEXAGON_S6_rol_i_p_or +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rxx32^=rol(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_rolxacc_PI(Word64 Rxx, Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_rolxacc_PI __builtin_HEXAGON_S6_rol_i_p_xacc +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rd32=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_rol_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_rol_RI __builtin_HEXAGON_S6_rol_i_r +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rx32+=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_rolacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_rolacc_RI __builtin_HEXAGON_S6_rol_i_r_acc +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rx32&=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_roland_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_roland_RI __builtin_HEXAGON_S6_rol_i_r_and +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rx32-=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_rolnac_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_rolnac_RI __builtin_HEXAGON_S6_rol_i_r_nac +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rx32|=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_rolor_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_rolor_RI __builtin_HEXAGON_S6_rol_i_r_or +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rx32^=rol(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_rolxacc_RI(Word32 Rx, Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_rolxacc_RI __builtin_HEXAGON_S6_rol_i_r_xacc +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Rdd32=vabsdiffb(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsdiffb_PP(Word64 Rtt, Word64 Rss) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsdiffb_PP __builtin_HEXAGON_M6_vabsdiffb +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Rdd32=vabsdiffub(Rtt32,Rss32) + C Intrinsic Prototype: Word64 Q6_P_vabsdiffub_PP(Word64 Rtt, Word64 Rss) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vabsdiffub_PP __builtin_HEXAGON_M6_vabsdiffub +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Rdd32=vsplatb(Rs32) + C Intrinsic Prototype: Word64 Q6_P_vsplatb_R(Word32 Rs) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vsplatb_R __builtin_HEXAGON_S6_vsplatrbp +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Rdd32=vtrunehb(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vtrunehb_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vtrunehb_PP __builtin_HEXAGON_S6_vtrunehb_ppp +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Rdd32=vtrunohb(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vtrunohb_PP(Word64 Rss, Word64 Rtt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vtrunohb_PP __builtin_HEXAGON_S6_vtrunohb_ppp +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HEXAGON_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Pd4=!any8(vcmpb.eq(Rss32,Rtt32)) + C Intrinsic Prototype: Byte Q6_p_not_any8_vcmpb_eq_PP(Word64 Rss, Word64 Rtt) + Instruction Type: ALU64 + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_p_not_any8_vcmpb_eq_PP __builtin_HEXAGON_A6_vcmpbeq_notany +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HEXAGON_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Rdd32=dfadd(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfadd_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfadd_PP __builtin_HEXAGON_F2_dfadd +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HEXAGON_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Rdd32=dfsub(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfsub_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfsub_PP __builtin_HEXAGON_F2_dfsub +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HEXAGON_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Rx32-=mpyi(Rs32,Rt32) + C Intrinsic Prototype: Word32 Q6_R_mpyinac_RR(Word32 Rx, Word32 Rs, Word32 Rt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mpyinac_RR __builtin_HEXAGON_M2_mnaci +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HEXAGON_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Rd32=mask(#u5,#U5) + C Intrinsic Prototype: Word32 Q6_R_mask_II(Word32 Iu5, Word32 IU5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_mask_II __builtin_HEXAGON_S2_mask +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=clip(Rs32,#u5) + C Intrinsic Prototype: Word32 Q6_R_clip_RI(Word32 Rs, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_R_clip_RI __builtin_HEXAGON_A7_clip +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cround(Rss32,#u6) + C Intrinsic Prototype: Word64 Q6_P_cround_PI(Word64 Rss, Word32 Iu6) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cround_PI __builtin_HEXAGON_A7_croundd_ri +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cround(Rss32,Rt32) + C Intrinsic Prototype: Word64 Q6_P_cround_PR(Word64 Rss, Word32 Rt) + Instruction Type: S_3op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_cround_PR __builtin_HEXAGON_A7_croundd_rr +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=vclip(Rss32,#u5) + C Intrinsic Prototype: Word64 Q6_P_vclip_PI(Word64 Rss, Word32 Iu5) + Instruction Type: S_2op + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_vclip_PI __builtin_HEXAGON_A7_vclip +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rdd32=dfmax(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmax_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmax_PP __builtin_HEXAGON_F2_dfmax +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rdd32=dfmin(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmin_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmin_PP __builtin_HEXAGON_F2_dfmin +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rdd32=dfmpyfix(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmpyfix_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmpyfix_PP __builtin_HEXAGON_F2_dfmpyfix +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rxx32+=dfmpyhh(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmpyhhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmpyhhacc_PP __builtin_HEXAGON_F2_dfmpyhh +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rxx32+=dfmpylh(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmpylhacc_PP(Float64 Rxx, Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmpylhacc_PP __builtin_HEXAGON_F2_dfmpylh +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 +/* ========================================================================== + Assembly Syntax: Rdd32=dfmpyll(Rss32,Rtt32) + C Intrinsic Prototype: Float64 Q6_P_dfmpyll_PP(Float64 Rss, Float64 Rtt) + Instruction Type: M + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_P_dfmpyll_PP __builtin_HEXAGON_F2_dfmpyll +#endif /* __HEXAGON_ARCH___ >= 67 */ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyiw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyiw_PP __builtin_HEXAGON_M7_dcmpyiw +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyiw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyiwacc_PP __builtin_HEXAGON_M7_dcmpyiw_acc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyiw(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_cmpyiw_PP_conj(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyiw_PP_conj __builtin_HEXAGON_M7_dcmpyiwc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyiw(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_cmpyiwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyiwacc_PP_conj __builtin_HEXAGON_M7_dcmpyiwc_acc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyrw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyrw_PP __builtin_HEXAGON_M7_dcmpyrw +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyrw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyrwacc_PP __builtin_HEXAGON_M7_dcmpyrw_acc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=cmpyrw(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_cmpyrw_PP_conj(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyrw_PP_conj __builtin_HEXAGON_M7_dcmpyrwc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rxx32+=cmpyrw(Rss32,Rtt32*) + C Intrinsic Prototype: Word64 Q6_P_cmpyrwacc_PP_conj(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_cmpyrwacc_PP_conj __builtin_HEXAGON_M7_dcmpyrwc_acc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rdd32=vdmpyw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vdmpyw_PP(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_vdmpyw_PP __builtin_HEXAGON_M7_vdmpy +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rxx32+=vdmpyw(Rss32,Rtt32) + C Intrinsic Prototype: Word64 Q6_P_vdmpywacc_PP(Word64 Rxx, Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_P_vdmpywacc_PP __builtin_HEXAGON_M7_vdmpy_acc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyiw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyiw +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyiw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiw_rnd +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32*):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyiw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyiwc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyiw(Rss32,Rtt32*):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyiw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyiw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyiwc_rnd +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyrw_PP_s1_sat __builtin_HEXAGON_M7_wcmpyrw +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyrw_PP_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrw_rnd +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32*):<<1:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyrw_PP_conj_s1_sat __builtin_HEXAGON_M7_wcmpyrwc +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 67 && defined __HEXAGON_AUDIO__ +/* ========================================================================== + Assembly Syntax: Rd32=cmpyrw(Rss32,Rtt32*):<<1:rnd:sat + C Intrinsic Prototype: Word32 Q6_R_cmpyrw_PP_conj_s1_rnd_sat(Word64 Rss, Word64 Rtt) + Instruction Type: M + Execution Slots: SLOT3 + ========================================================================== */ + +#define Q6_R_cmpyrw_PP_conj_s1_rnd_sat __builtin_HEXAGON_M7_wcmpyrwc_rnd +#endif /* __HEXAGON_ARCH___ >= 67 && defined __HEXAGON_AUDIO__*/ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: dmlink(Rs32,Rt32) + C Intrinsic Prototype: void Q6_dmlink_AA(Address Rs, Address Rt) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dmlink_AA __builtin_HEXAGON_Y6_dmlink +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Rd32=dmpause + C Intrinsic Prototype: Word32 Q6_R_dmpause() + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_R_dmpause __builtin_HEXAGON_Y6_dmpause +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Rd32=dmpoll + C Intrinsic Prototype: Word32 Q6_R_dmpoll() + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_R_dmpoll __builtin_HEXAGON_Y6_dmpoll +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: dmresume(Rs32) + C Intrinsic Prototype: void Q6_dmresume_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dmresume_A __builtin_HEXAGON_Y6_dmresume +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: dmstart(Rs32) + C Intrinsic Prototype: void Q6_dmstart_A(Address Rs) + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_dmstart_A __builtin_HEXAGON_Y6_dmstart +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HEXAGON_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Rd32=dmwait + C Intrinsic Prototype: Word32 Q6_R_dmwait() + Instruction Type: ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_R_dmwait __builtin_HEXAGON_Y6_dmwait +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#include +#ifdef __HVX__ +#include +#endif /* __HVX__ */ +#endif diff --git a/library/stdarch/crates/stdarch-gen-hexagon-scalar/src/main.rs b/library/stdarch/crates/stdarch-gen-hexagon-scalar/src/main.rs new file mode 100644 index 0000000000000..bbe28174ffa05 --- /dev/null +++ b/library/stdarch/crates/stdarch-gen-hexagon-scalar/src/main.rs @@ -0,0 +1,672 @@ +//! Hexagon Scalar Code Generator +//! +//! This generator creates scalar.rs from the LLVM `hexagon_protos.h` header file. +//! It parses the C intrinsic prototypes and generates Rust wrapper functions +//! with appropriate attributes for all scalar (non-HVX) Hexagon intrinsics. +//! +//! The generated module provides ~901 scalar intrinsic wrappers covering: +//! - Arithmetic, multiply, shift, saturate operations +//! - Compare, floating-point, and other scalar operations +//! +//! Intrinsics with `void*`/`void**` parameters (circular-addressing) are skipped +//! because they have no corresponding LLVM intrinsic. +//! +//! Usage: +//! cd crates/stdarch-gen-hexagon-scalar +//! cargo run +//! # Output is written to ../core_arch/src/hexagon/scalar.rs + +use regex::Regex; +use std::collections::HashMap; +use std::fs::File; +use std::io::Write; +use std::path::Path; + +/// Extract the instruction mnemonic from the assembly syntax string. +/// +/// Examples: +/// - `Rd32=abs(Rs32)` → Some("abs") +/// - `Rd32=add(Rs32,Rt32):sat` → Some("add") +/// - `Rx32+=mpy(Rs32,Rt32)` → Some("mpy") +/// - `Rd32=dmpause` → Some("dmpause") +/// - `dmlink(Rs32,Rt32)` → Some("dmlink") +/// - `Rd32=Rs32` → None (simple transfer) +/// - `Rx32.h=#u16` → None (immediate load) +/// - `Rdd32=#s8` → None (immediate load) +fn extract_instr_name(asm_syntax: &str) -> Option { + // Find the operator: +=, -=, or = + let after_op = if let Some(pos) = asm_syntax.find("+=") { + &asm_syntax[pos + 2..] + } else if let Some(pos) = asm_syntax.find("-=") { + &asm_syntax[pos + 2..] + } else if let Some(pos) = asm_syntax.find('=') { + &asm_syntax[pos + 1..] + } else { + // No assignment operator: try function-call-style syntax like "dmlink(Rs32,Rt32)". + // The mnemonic is the leading lowercase identifier. + return extract_leading_mnemonic(asm_syntax); + }; + + // After the operator, we expect a lowercase letter starting the mnemonic. + // Skip if it starts with uppercase (register name like Rs32) or # (immediate). + extract_leading_mnemonic(after_op) +} + +/// Extract a leading lowercase mnemonic from the given string. +/// +/// Returns `Some(mnemonic)` if the string starts with a lowercase ASCII letter, +/// collecting all subsequent alphanumeric/underscore characters. Returns `None` +/// if the string is empty or starts with an uppercase letter, `#`, etc. +fn extract_leading_mnemonic(s: &str) -> Option { + let first_char = s.chars().next()?; + if !first_char.is_ascii_lowercase() { + return None; + } + let mnemonic: String = s + .chars() + .take_while(|c| c.is_ascii_alphanumeric() || *c == '_') + .collect(); + if mnemonic.is_empty() { + None + } else { + Some(mnemonic) + } +} + +/// The tracking issue number for the stdarch_hexagon feature +const TRACKING_ISSUE: &str = "151523"; + +/// LLVM version the header file is from (for reference) +const LLVM_VERSION: &str = "22.1.0"; + +/// Local header file path (checked into the repository) +const HEADER_FILE: &str = "hexagon_protos.h"; + +/// Rust type representation for scalar intrinsics +#[derive(Debug, Clone, PartialEq)] +enum RustType { + I32, + I64, + F32, + F64, + Unit, +} + +impl RustType { + fn from_c_type(c_type: &str) -> Option { + match c_type.trim() { + "Word32" | "UWord32" | "Byte" | "Address" => Some(RustType::I32), + "Word64" | "UWord64" => Some(RustType::I64), + "Float32" => Some(RustType::F32), + "Float64" => Some(RustType::F64), + "void" => Some(RustType::Unit), + _ => None, + } + } + + fn to_rust_str(&self) -> &'static str { + match self { + RustType::I32 => "i32", + RustType::I64 => "i64", + RustType::F32 => "f32", + RustType::F64 => "f64", + RustType::Unit => "()", + } + } +} + +/// Information about an immediate operand parameter. +/// +/// Detected from C prototype parameter names like `Is16` (signed 16-bit), +/// `Iu5` (unsigned 5-bit), `IU5` (unsigned 5-bit secondary), `Iu6_2` +/// (unsigned 6-bit with 2-bit alignment). +#[derive(Debug, Clone)] +struct ImmediateInfo { + /// Whether this is a signed immediate + signed: bool, + /// Number of bits in the immediate field + bits: u32, +} + +/// Arch guard for an intrinsic +#[derive(Debug, Clone, PartialEq)] +enum ArchGuard { + /// No guard (base v5/v55 intrinsics) + None, + /// `#if __HEXAGON_ARCH__ >= N` + Arch(u32), + /// `#if __HEXAGON_ARCH__ >= N && defined __HEXAGON_AUDIO__` + ArchAudio(u32), +} + +impl ArchGuard { + /// Returns a doc comment describing the required architecture version, + /// or None if no specific version is needed. + fn requires_doc(&self) -> Option { + match self { + ArchGuard::None => Option::None, + ArchGuard::Arch(ver) => Some(format!("Requires: V{}", ver)), + ArchGuard::ArchAudio(ver) => Some(format!("Requires: V{}, Audio", ver)), + } + } + + /// Returns a `#[cfg_attr(target_arch = "hexagon", target_feature(enable = "..."))]` + /// attribute string, or None for base intrinsics that have no user-facing feature gate. + fn target_feature_attr(&self) -> Option { + match self { + ArchGuard::None => None, + ArchGuard::Arch(ver) => Some(format!( + "#[cfg_attr(target_arch = \"hexagon\", target_feature(enable = \"v{}\"))]", + ver + )), + ArchGuard::ArchAudio(ver) => Some(format!( + "#[cfg_attr(target_arch = \"hexagon\", target_feature(enable = \"v{},audio\"))]", + ver + )), + } + } +} + +/// Parsed scalar intrinsic information +#[derive(Debug, Clone)] +struct ScalarIntrinsic { + /// Q6 name (e.g., "Q6_R_add_RR") + q6_name: String, + /// Builtin suffix (e.g., "A2_add") - from __builtin_HEXAGON_A2_add + builtin_name: String, + /// Assembly syntax + asm_syntax: String, + /// Instruction type + instr_type: String, + /// Execution slots + exec_slots: String, + /// Return type + return_type: RustType, + /// Parameters (name, type, optional immediate info) + params: Vec<(String, RustType, Option)>, + /// Architecture guard + arch_guard: ArchGuard, +} + +impl ScalarIntrinsic { + /// Generate the LLVM link name: A2_add -> llvm.hexagon.A2.add + fn llvm_link_name(&self) -> String { + format!("llvm.hexagon.{}", self.builtin_name.replace('_', ".")) + } + + /// Generate the Rust function name: Q6_R_add_RR -> Q6_R_add_RR + /// + /// We preserve the original case because the Q6 naming convention uses + /// case to distinguish register types: + /// - `P` (uppercase) = 64-bit register pair (Word64) + /// - `p` (lowercase) = predicate register (Byte) + fn rust_fn_name(&self) -> String { + self.q6_name.clone() + } + + /// Generate the extern function name: A2_add -> hexagon_A2_add + fn extern_fn_name(&self) -> String { + format!("hexagon_{}", self.builtin_name) + } +} + +/// Read the local header file +fn read_header(crate_dir: &Path) -> Result { + let header_path = crate_dir.join(HEADER_FILE); + println!("Reading scalar header from: {}", header_path.display()); + println!(" (LLVM version: {})", LLVM_VERSION); + + std::fs::read_to_string(&header_path).map_err(|e| { + format!( + "Failed to read header file {}: {}", + header_path.display(), + e + ) + }) +} + +/// Detect whether a C parameter name represents an immediate operand. +/// +/// C prototype parameter names follow the pattern `I[usUS]\d+` for immediates: +/// - `Is16` → signed 16-bit +/// - `Iu5` → unsigned 5-bit +/// - `IS8` → signed 8-bit (secondary) +/// - `IU5` → unsigned 5-bit (secondary) +/// - `Iu6_2` → unsigned 6-bit (with alignment suffix) +fn detect_immediate(original_name: &str, imm_re: &Regex) -> Option { + imm_re.captures(original_name).map(|caps| { + let sign_char = &caps[1]; + let bits: u32 = caps[2].parse().unwrap(); + ImmediateInfo { + signed: sign_char == "s" || sign_char == "S", + bits, + } + }) +} + +/// Parse a C function prototype to extract return type and parameters +fn parse_prototype( + prototype: &str, + proto_re: &Regex, + param_re: &Regex, + imm_re: &Regex, +) -> Option<(RustType, Vec<(String, RustType, Option)>)> { + if let Some(caps) = proto_re.captures(prototype) { + let return_type_str = caps[1].trim(); + let params_str = &caps[2]; + + // Skip if return type is unknown + let return_type = RustType::from_c_type(return_type_str)?; + + let mut params = Vec::new(); + if !params_str.trim().is_empty() { + let mut name_counts: HashMap = HashMap::new(); + for param in params_str.split(',') { + let param = param.trim(); + if let Some(pcaps) = param_re.captures(param) { + let ptype_str = &pcaps[1]; + let original_name = &pcaps[2]; + let base_name = original_name.to_lowercase(); + + // Skip intrinsics with void* or void** params + if ptype_str.contains("void") { + return None; + } + + if let Some(ptype) = RustType::from_c_type(ptype_str) { + // Detect immediate operands from the original C name + let imm_info = detect_immediate(original_name, imm_re); + + // De-duplicate parameter names by appending a suffix + let count = name_counts.entry(base_name.clone()).or_insert(0); + *count += 1; + let pname = if *count > 1 { + format!("{}_{}", base_name, count) + } else { + base_name + }; + params.push((pname, ptype, imm_info)); + } else { + return None; // Unknown type + } + } + } + } + + Some((return_type, params)) + } else { + None + } +} + +/// Parse the header file to extract all scalar intrinsics +fn parse_header(content: &str) -> Vec { + let mut intrinsics = Vec::new(); + + // Pre-compile all regexes once + let arch_guard_re = Regex::new(r"#if __HEXAGON_ARCH__ >= (\d+)(.*)").unwrap(); + let q6_define_re = Regex::new(r"#define\s+(Q6_\w+)\s+__builtin_HEXAGON_(\w+)").unwrap(); + let proto_re = Regex::new(r"(\w+)\s+Q6_\w+\(([^)]*)\)").unwrap(); + let param_re = Regex::new(r"(\w+\*{0,2})\s+(\w+)").unwrap(); + let imm_re = Regex::new(r"^I([uUsS])(\d+)").unwrap(); + + let lines: Vec<&str> = content.lines().collect(); + let mut current_guard = ArchGuard::None; + let mut i = 0; + + while i < lines.len() { + let line = lines[i].trim(); + + // Track #if guards + if let Some(caps) = arch_guard_re.captures(line) { + let arch_ver: u32 = caps[1].parse().unwrap_or(0); + let rest = &caps[2]; + if rest.contains("__HEXAGON_AUDIO__") { + current_guard = ArchGuard::ArchAudio(arch_ver); + } else { + current_guard = ArchGuard::Arch(arch_ver); + } + i += 1; + continue; + } + + // Track #endif to reset guard + if line.starts_with("#endif") + && !line.contains("__HEXAGON_PROTOS_H_") + && !line.contains("__HVX__") + { + current_guard = ArchGuard::None; + i += 1; + continue; + } + + // Look for comment blocks with Assembly Syntax + if line.contains("Assembly Syntax:") { + let mut asm_syntax = String::new(); + let mut prototype = String::new(); + let mut instr_type = String::new(); + let mut exec_slots = String::new(); + + // Parse the comment block + let mut j = i; + while j < lines.len() && !lines[j].trim().starts_with("#define") { + let cline = lines[j]; + if cline.contains("Assembly Syntax:") { + if let Some(pos) = cline.find("Assembly Syntax:") { + asm_syntax = cline[pos + 16..].trim().to_string(); + } + } else if cline.contains("C Intrinsic Prototype:") { + if let Some(pos) = cline.find("C Intrinsic Prototype:") { + prototype = cline[pos + 22..].trim().to_string(); + } + } else if cline.contains("Instruction Type:") { + if let Some(pos) = cline.find("Instruction Type:") { + instr_type = cline[pos + 17..].trim().to_string(); + } + } else if cline.contains("Execution Slots:") { + if let Some(pos) = cline.find("Execution Slots:") { + exec_slots = cline[pos + 16..].trim().to_string(); + } + } + j += 1; + } + + // Find the #define line + while j < lines.len() && !lines[j].trim().starts_with("#define") { + j += 1; + } + + if j < lines.len() { + let define_line = lines[j]; + + if let Some(caps) = q6_define_re.captures(define_line) { + let q6_name = caps[1].to_string(); + let builtin_name = caps[2].to_string(); + + // Parse the C prototype + if let Some((return_type, params)) = + parse_prototype(&prototype, &proto_re, ¶m_re, &imm_re) + { + intrinsics.push(ScalarIntrinsic { + q6_name, + builtin_name, + asm_syntax, + instr_type, + exec_slots, + return_type, + params, + arch_guard: current_guard.clone(), + }); + } + } + } + i = j + 1; + continue; + } + + i += 1; + } + + intrinsics +} + +/// Generate the module documentation +fn generate_module_doc() -> String { + r#"//! Hexagon scalar intrinsics +//! +//! This module provides intrinsics for scalar (non-HVX) Hexagon DSP operations, +//! including arithmetic, multiply, shift, saturate, compare, and floating-point +//! operations. +//! +//! [Hexagon V68 Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-45) +//! +//! ## Naming Convention +//! +//! Function names preserve the original Q6 naming case because the convention +//! uses case to distinguish register types: +//! - `P` (uppercase) = 64-bit register pair (`Word64`) +//! - `p` (lowercase) = predicate register (`Byte`) +//! +//! For example, `Q6_P_and_PP` operates on 64-bit pairs while `Q6_p_and_pp` +//! operates on predicate registers. +//! +//! ## Architecture Versions +//! +//! Most scalar intrinsics are available on all Hexagon architectures. +//! Some intrinsics require specific architecture versions (v60, v62, v65, +//! v66, v67, v68, or v67+audio) and carry +//! `#[target_feature(enable = "v68")]` (or the appropriate version). +//! Enable these with `-C target-feature=+v68` or by setting the target CPU +//! via `-C target-cpu=hexagonv68`. +//! +//! Each version includes all features from previous versions. + +#![allow(non_snake_case)] + +#[cfg(test)] +use stdarch_test::assert_instr; +"# + .to_string() +} + +/// Generate the extern block with LLVM intrinsic declarations +fn generate_extern_block(intrinsics: &[ScalarIntrinsic]) -> String { + let mut output = String::new(); + + output.push_str("// LLVM intrinsic declarations for Hexagon scalar operations\n"); + output.push_str("#[allow(improper_ctypes)]\n"); + output.push_str("unsafe extern \"unadjusted\" {\n"); + + for info in intrinsics { + let link_name = info.llvm_link_name(); + let fn_name = info.extern_fn_name(); + + let params_str = if info.params.is_empty() { + String::new() + } else { + info.params + .iter() + .map(|(_, t, _)| format!("_: {}", t.to_rust_str())) + .collect::>() + .join(", ") + }; + + let return_str = if info.return_type == RustType::Unit { + String::new() + } else { + format!(" -> {}", info.return_type.to_rust_str()) + }; + + output.push_str(&format!( + " #[link_name = \"{}\"]\n fn {}({}){return_str};\n", + link_name, fn_name, params_str + )); + } + + output.push_str("}\n"); + output +} + +/// Generate wrapper functions for all intrinsics +fn generate_functions(intrinsics: &[ScalarIntrinsic]) -> String { + let mut output = String::new(); + + for info in intrinsics { + let rust_name = info.rust_fn_name(); + let extern_name = info.extern_fn_name(); + + // Collect immediate parameter info: (original_index, const_name, ImmediateInfo) + let imm_params: Vec<(usize, String, &ImmediateInfo)> = info + .params + .iter() + .enumerate() + .filter_map(|(i, (name, _, imm))| imm.as_ref().map(|im| (i, name.to_uppercase(), im))) + .collect(); + + // Doc comment + output.push_str(&format!("/// `{}`\n", info.asm_syntax)); + output.push_str("///\n"); + output.push_str(&format!("/// Instruction Type: {}\n", info.instr_type)); + output.push_str(&format!("/// Execution Slots: {}\n", info.exec_slots)); + if let Some(req) = info.arch_guard.requires_doc() { + output.push_str(&format!("/// {}\n", req)); + } + + // Attributes + output.push_str("#[inline(always)]\n"); + if let Some(tf_attr) = info.arch_guard.target_feature_attr() { + output.push_str(&format!("{}\n", tf_attr)); + } + + // Immediate parameters become const generics but are passed as positional + // arguments at the call site: Q6_R_add_RI(rs, 42) rather than Q6_R_add_RI::<42>(rs). + // This matches the assembly syntax where the immediate is an operand. + if !imm_params.is_empty() { + let indices: Vec = imm_params.iter().map(|(i, _, _)| i.to_string()).collect(); + output.push_str(&format!( + "#[rustc_legacy_const_generics({})]\n", + indices.join(", ") + )); + } + + if let Some(instr) = extract_instr_name(&info.asm_syntax) { + if imm_params.is_empty() { + output.push_str(&format!("#[cfg_attr(test, assert_instr({}))]\n", instr)); + } else { + // Provide default values for const generics in assert_instr + let defaults: Vec = imm_params + .iter() + .map(|(_, name, _)| format!("{} = 0", name)) + .collect(); + output.push_str(&format!( + "#[cfg_attr(test, assert_instr({}, {}))]\n", + instr, + defaults.join(", ") + )); + } + } + + output.push_str(&format!( + "#[unstable(feature = \"stdarch_hexagon\", issue = \"{}\")]\n", + TRACKING_ISSUE + )); + + // Function signature: regular params exclude immediates, const generics added + let regular_params_str = info + .params + .iter() + .filter(|(_, _, imm)| imm.is_none()) + .map(|(name, ty, _)| format!("{}: {}", name, ty.to_rust_str())) + .collect::>() + .join(", "); + + let return_str = if info.return_type == RustType::Unit { + String::new() + } else { + format!(" -> {}", info.return_type.to_rust_str()) + }; + + if imm_params.is_empty() { + output.push_str(&format!( + "pub unsafe fn {}({}){} {{\n", + rust_name, regular_params_str, return_str + )); + } else { + let const_generics: Vec = imm_params + .iter() + .map(|(_, name, imm)| { + let ty = if imm.signed { "i32" } else { "u32" }; + format!("const {}: {}", name, ty) + }) + .collect(); + output.push_str(&format!( + "pub unsafe fn {}<{}>({}){} {{\n", + rust_name, + const_generics.join(", "), + regular_params_str, + return_str + )); + } + + // Function body: static assertions then call + for (_, const_name, imm_info) in &imm_params { + if imm_info.signed { + output.push_str(&format!( + " static_assert_simm_bits!({}, {});\n", + const_name, imm_info.bits + )); + } else { + output.push_str(&format!( + " static_assert_uimm_bits!({}, {});\n", + const_name, imm_info.bits + )); + } + } + + // Call args: use original order, using const generic names for immediates. + // Unsigned const generics (u32) need a cast to i32 for the extern call. + let args_str = info + .params + .iter() + .map(|(name, _, imm)| match imm { + Some(info) if !info.signed => format!("{} as i32", name.to_uppercase()), + Some(_) => name.to_uppercase(), + None => name.clone(), + }) + .collect::>() + .join(", "); + + output.push_str(&format!(" {}({})\n", extern_name, args_str)); + output.push_str("}\n\n"); + } + + output +} + +/// Generate the complete scalar.rs file +fn generate_scalar_file(intrinsics: &[ScalarIntrinsic], output_path: &Path) -> Result<(), String> { + let mut output = + File::create(output_path).map_err(|e| format!("Failed to create output: {}", e))?; + + writeln!(output, "{}", generate_module_doc()).map_err(|e| e.to_string())?; + writeln!(output, "").map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_extern_block(intrinsics)).map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_functions(intrinsics)).map_err(|e| e.to_string())?; + + // Flush before running rustfmt + drop(output); + + // Run rustfmt on the generated file + let status = std::process::Command::new("rustfmt") + .arg(output_path) + .status() + .map_err(|e| format!("Failed to run rustfmt: {}", e))?; + + if !status.success() { + return Err("rustfmt failed".to_string()); + } + + Ok(()) +} + +fn main() -> Result<(), String> { + println!("=== Hexagon Scalar Code Generator ===\n"); + + let crate_dir = std::env::var("CARGO_MANIFEST_DIR") + .map(std::path::PathBuf::from) + .unwrap_or_else(|_| std::env::current_dir().unwrap()); + + let header_content = read_header(&crate_dir)?; + println!("Read {} bytes", header_content.len()); + + let intrinsics = parse_header(&header_content); + println!("Parsed {} scalar intrinsics", intrinsics.len()); + + let hexagon_dir = crate_dir.join("../core_arch/src/hexagon"); + let scalar_path = hexagon_dir.join("scalar.rs"); + + generate_scalar_file(&intrinsics, &scalar_path)?; + println!("Generated scalar.rs at {}", scalar_path.display()); + + Ok(()) +} diff --git a/library/stdarch/examples/gaussian.rs b/library/stdarch/examples/gaussian.rs index a310c24def9a7..f41f7539860b0 100644 --- a/library/stdarch/examples/gaussian.rs +++ b/library/stdarch/examples/gaussian.rs @@ -296,13 +296,14 @@ fn main() { // Run reference gaussian3x3u8_reference(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_ref); - // Verify HVX matches reference (allowing small rounding differences) + // Verify HVX matches reference (allowing small rounding differences). + use core_arch::arch::hexagon::scalar::{Q6_R_abs_R, Q6_R_max_RR}; let mut max_diff = 0i32; for y in 1..HEIGHT - 1 { for x in 1..WIDTH - 1 { let idx = y * WIDTH + x; - let diff = (dst_hvx.0[idx] as i32 - dst_ref[idx] as i32).abs(); - max_diff = max_diff.max(diff); + let diff = unsafe { Q6_R_abs_R(dst_hvx.0[idx] as i32 - dst_ref[idx] as i32) }; + max_diff = unsafe { Q6_R_max_RR(max_diff, diff) }; // Allow up to 1 LSB difference due to rounding assert!( diff <= 1, From be08ac83312c6ca0ffa09b4c20047bc7db48561d Mon Sep 17 00:00:00 2001 From: Adam Gemmell Date: Thu, 16 Apr 2026 15:22:10 +0100 Subject: [PATCH 05/30] Update arm_intrinsics.json Most of the changes in the JSON were merged earlier in the SVE intrinsics PR --- .../crates/intrinsic-test/missing_aarch64.txt | 67 +- .../intrinsic-test/missing_aarch64_be.txt | 65 +- .../crates/intrinsic-test/missing_arm.txt | 49 +- .../crates/intrinsic-test/src/arm/compile.rs | 2 +- .../crates/intrinsic-test/src/arm/config.rs | 4 +- .../intrinsics_data/arm_intrinsics.json | 9410 ++--------------- 6 files changed, 948 insertions(+), 8649 deletions(-) diff --git a/library/stdarch/crates/intrinsic-test/missing_aarch64.txt b/library/stdarch/crates/intrinsic-test/missing_aarch64.txt index 3c1ac59e910e9..f0c9eeb6ce2c9 100644 --- a/library/stdarch/crates/intrinsic-test/missing_aarch64.txt +++ b/library/stdarch/crates/intrinsic-test/missing_aarch64.txt @@ -1,8 +1,14 @@ # Not supported by qemu (will throw illegal instruction) -vamax_f16 -vamaxq_f16 vamin_f16 vaminq_f16 +vamin_f32 +vaminq_f32 +vaminq_f64 +vamax_f16 +vamaxq_f16 +vamax_f32 +vamaxq_f32 +vamaxq_f64 vscale_f16 vscale_f32 vscaleq_f16 @@ -37,39 +43,6 @@ vluti2q_laneq_s8 vluti2q_laneq_u16 vluti2q_laneq_u8 vluti2q_lane_u8 - -# Not implemented in stdarch yet -vbfdot_f32 -vbfdot_lane_f32 -vbfdot_laneq_f32 -vbfdotq_f32 -vbfdotq_lane_f32 -vbfdotq_laneq_f32 -vbfmlalbq_f32 -vbfmlalbq_lane_f32 -vbfmlalbq_laneq_f32 -vbfmlaltq_f32 -vbfmlaltq_lane_f32 -vbfmlaltq_laneq_f32 -vbfmmlaq_f32 - - -# Implemented in stdarch, but missing in Clang. -vrnd32xq_f64 -vrnd32zq_f64 -vrnd64xq_f64 -vrnd64zq_f64 -vamin_f32 -vaminq_f32 -vaminq_f64 -vamax_f32 -vamaxq_f32 -vamaxq_f64 -# LLVM select error, and missing in Clang. -vrnd32x_f64 -vrnd32z_f64 -vrnd64x_f64 -vrnd64z_f64 vluti4q_lane_f16_x2 vluti4q_lane_p16_x2 vluti4q_lane_p8 @@ -85,5 +58,27 @@ vluti4q_laneq_s8 vluti4q_laneq_u16_x2 vluti4q_laneq_u8 -# Broken in Clang +# Not implemented in stdarch yet +vcvtad_s32_f64 +vcvtad_u32_f64 +vcvtd_s32_f64 +vcvtd_u32_f64 +vcvtmd_s32_f64 +vcvtmd_u32_f64 +vcvtnd_s32_f64 +vcvtnd_u32_f64 +vcvtpd_s32_f64 +vcvtpd_u32_f64 +vcvts_s64_f32 +vcvts_u64_f32 +vcvtas_s64_f32 +vcvtas_u64_f32 +vcvtms_s64_f32 +vcvtms_u64_f32 +vcvtns_s64_f32 +vcvtns_u64_f32 +vcvtps_s64_f32 +vcvtps_u64_f32 + +# Broken in Clang (fixed in https://github.com/llvm/llvm-project/pull/156029) vcvth_s16_f16 diff --git a/library/stdarch/crates/intrinsic-test/missing_aarch64_be.txt b/library/stdarch/crates/intrinsic-test/missing_aarch64_be.txt index f3c4ffa3d0640..9163aaa1c8db0 100644 --- a/library/stdarch/crates/intrinsic-test/missing_aarch64_be.txt +++ b/library/stdarch/crates/intrinsic-test/missing_aarch64_be.txt @@ -39,10 +39,16 @@ vusdotq_laneq_s32 # Below are in common to missing_aarch64.txt # Not supported by qemu (will throw illegal instruction) -vamax_f16 -vamaxq_f16 vamin_f16 vaminq_f16 +vamin_f32 +vaminq_f32 +vaminq_f64 +vamax_f16 +vamaxq_f16 +vamax_f32 +vamaxq_f32 +vamaxq_f64 vscale_f16 vscale_f32 vscaleq_f16 @@ -77,39 +83,6 @@ vluti2q_laneq_s8 vluti2q_laneq_u16 vluti2q_laneq_u8 vluti2q_lane_u8 - -# Not implemented in stdarch yet -vbfdot_f32 -vbfdot_lane_f32 -vbfdot_laneq_f32 -vbfdotq_f32 -vbfdotq_lane_f32 -vbfdotq_laneq_f32 -vbfmlalbq_f32 -vbfmlalbq_lane_f32 -vbfmlalbq_laneq_f32 -vbfmlaltq_f32 -vbfmlaltq_lane_f32 -vbfmlaltq_laneq_f32 -vbfmmlaq_f32 - - -# Implemented in stdarch, but missing in Clang. -vrnd32xq_f64 -vrnd32zq_f64 -vrnd64xq_f64 -vrnd64zq_f64 -vamin_f32 -vaminq_f32 -vaminq_f64 -vamax_f32 -vamaxq_f32 -vamaxq_f64 -# LLVM select error, and missing in Clang. -vrnd32x_f64 -vrnd32z_f64 -vrnd64x_f64 -vrnd64z_f64 vluti4q_lane_f16_x2 vluti4q_lane_p16_x2 vluti4q_lane_p8 @@ -125,5 +98,27 @@ vluti4q_laneq_s8 vluti4q_laneq_u16_x2 vluti4q_laneq_u8 +# Not implemented in stdarch yet +vcvtad_s32_f64 +vcvtad_u32_f64 +vcvtd_s32_f64 +vcvtd_u32_f64 +vcvtmd_s32_f64 +vcvtmd_u32_f64 +vcvtnd_s32_f64 +vcvtnd_u32_f64 +vcvtpd_s32_f64 +vcvtpd_u32_f64 +vcvts_s64_f32 +vcvts_u64_f32 +vcvtas_s64_f32 +vcvtas_u64_f32 +vcvtms_s64_f32 +vcvtms_u64_f32 +vcvtns_s64_f32 +vcvtns_u64_f32 +vcvtps_s64_f32 +vcvtps_u64_f32 + # Broken in Clang vcvth_s16_f16 diff --git a/library/stdarch/crates/intrinsic-test/missing_arm.txt b/library/stdarch/crates/intrinsic-test/missing_arm.txt index 04c09a27d90d4..165b45d50cf1f 100644 --- a/library/stdarch/crates/intrinsic-test/missing_arm.txt +++ b/library/stdarch/crates/intrinsic-test/missing_arm.txt @@ -1,18 +1,3 @@ -# Not implemented in stdarch yet -vbfdot_f32 -vbfdot_lane_f32 -vbfdot_laneq_f32 -vbfdotq_f32 -vbfdotq_lane_f32 -vbfdotq_laneq_f32 -vbfmlalbq_f32 -vbfmlalbq_lane_f32 -vbfmlalbq_laneq_f32 -vbfmlaltq_f32 -vbfmlaltq_lane_f32 -vbfmlaltq_laneq_f32 -vbfmmlaq_f32 - # Implemented in Clang and stdarch for A64 only even though CSV claims A32 support vaddq_p64 vbsl_p64 @@ -134,18 +119,6 @@ vcvth_f16_u32 vcvth_u32_f16 vcvth_n_f16_u32 vcvth_n_u32_f16 -vcvtah_s32_f16 -vcvtah_u32_f16 -vcvtmh_s32_f16 -vcvtmh_u32_f16 -vcvtpq_s16_f16 -vcvtpq_u16_f16 -vcvtp_s16_f16 -vcvtp_u16_f16 -vcvtph_s32_f16 -vcvtph_u32_f16 -vcvtnh_u32_f16 -vcvtnh_s32_f16 vfmlsl_low_f16 vfmlslq_low_f16 vfmlsl_high_f16 @@ -318,3 +291,25 @@ vcvta_s16_f16 vcvta_u16_f16 vceqz_f16 vceqzq_f16 +vcvtah_s32_f16 +vcvtah_u32_f16 +vcvtmh_s32_f16 +vcvtmh_u32_f16 +vcvtpq_s16_f16 +vcvtpq_u16_f16 +vcvtp_s16_f16 +vcvtp_u16_f16 +vcvtph_s32_f16 +vcvtph_u32_f16 +vcvtnh_u32_f16 +vcvtnh_s32_f16 + +# Missing in Clang +vusdot_laneq_s32 +vsudot_laneq_s32 +vusdotq_laneq_s32 +vsudotq_laneq_s32 +vdot_laneq_s32 +vdot_laneq_u32 +vdotq_laneq_s32 +vdotq_laneq_u32 diff --git a/library/stdarch/crates/intrinsic-test/src/arm/compile.rs b/library/stdarch/crates/intrinsic-test/src/arm/compile.rs index 7da35f9a1114b..a672da2cc0d6b 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/compile.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/compile.rs @@ -15,7 +15,7 @@ pub fn build_cpp_compilation(config: &ProcessedCli) -> Option { .add_extra_flags(["-ffp-contract=off", "-Wno-narrowing"]); if !config.target.contains("v7") { - command = command.add_arch_flags(["faminmax", "lut", "sha3"]); + command = command.add_arch_flags(["faminmax", "lut", "sha3", "fp8"]); } if !cpp_compiler.contains("clang") { diff --git a/library/stdarch/crates/intrinsic-test/src/arm/config.rs b/library/stdarch/crates/intrinsic-test/src/arm/config.rs index 85cb21c2d6c4e..60bb0ca56cefa 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/config.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/config.rs @@ -58,7 +58,9 @@ pub const PLATFORM_RUST_CFGS: &str = r#" #![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_i8mm))] #![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_sm4))] #![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_ftts))] -#![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_aarch64_jscvt))] +#![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_feat_lut))] +#![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(stdarch_neon_fp8))] +#![cfg_attr(any(target_arch = "aarch64", target_arch = "arm64ec"), feature(faminmax))] #![feature(fmt_helpers_for_derive)] #![feature(stdarch_neon_f16)] diff --git a/library/stdarch/intrinsics_data/arm_intrinsics.json b/library/stdarch/intrinsics_data/arm_intrinsics.json index 3a3b962a48730..fab6da7f2c16c 100644 --- a/library/stdarch/intrinsics_data/arm_intrinsics.json +++ b/library/stdarch/intrinsics_data/arm_intrinsics.json @@ -35103,114 +35103,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svcntp_c16", - "arguments": [ - "svcount_t pnn", - "uint64_t vl" - ], - "return_type": { - "value": "uint64_t" - }, - "Arguments_Preparation": { - "pnn": { - "register": "PNreg1.H" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "CNTP" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svcntp_c32", - "arguments": [ - "svcount_t pnn", - "uint64_t vl" - ], - "return_type": { - "value": "uint64_t" - }, - "Arguments_Preparation": { - "pnn": { - "register": "PNreg1.S" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "CNTP" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svcntp_c64", - "arguments": [ - "svcount_t pnn", - "uint64_t vl" - ], - "return_type": { - "value": "uint64_t" - }, - "Arguments_Preparation": { - "pnn": { - "register": "PNreg1.D" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "CNTP" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svcntp_c8", - "arguments": [ - "svcount_t pnn", - "uint64_t vl" - ], - "return_type": { - "value": "uint64_t" - }, - "Arguments_Preparation": { - "pnn": { - "register": "PNreg1.B" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "CNTP" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svcntw", @@ -35409,20 +35301,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svcreate2[_b]", - "arguments": [ - "svbool_t x", - "svbool_t y" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Architectures": [ - "A64" - ] - }, { "SIMD_ISA": "SVE", "name": "svcreate2[_f16]", @@ -35742,22 +35620,6 @@ "A64" ] }, - { - "SIMD_ISA": "SVE2", - "name": "svcreate4[_b]", - "arguments": [ - "svbool_t x", - "svbool_t y", - "svbool_t z", - "svbool_t w" - ], - "return_type": { - "value": "svboolx4_t" - }, - "Architectures": [ - "A64" - ] - }, { "SIMD_ISA": "SVE", "name": "svcreate4[_f16]", @@ -51305,20 +51167,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svget2[_b]", - "arguments": [ - "svboolx2_t tuple", - "uint64_t imm_index" - ], - "return_type": { - "value": "svbool_t" - }, - "Architectures": [ - "A64" - ] - }, { "SIMD_ISA": "SVE", "name": "svget2[_f16]", @@ -51627,20 +51475,6 @@ "A64" ] }, - { - "SIMD_ISA": "SVE2", - "name": "svget4[_b]", - "arguments": [ - "svboolx4_t tuple", - "uint64_t imm_index" - ], - "return_type": { - "value": "svbool_t" - }, - "Architectures": [ - "A64" - ] - }, { "SIMD_ISA": "SVE", "name": "svget4[_f16]", @@ -58513,60 +58347,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_f16]_x2", - "arguments": [ - "svcount_t png", - "float16_t const * rn" - ], - "return_type": { - "value": "svfloat16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_f16]_x4", - "arguments": [ - "svcount_t png", - "float16_t const * rn" - ], - "return_type": { - "value": "svfloat16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1[_f32]", @@ -58597,60 +58377,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_f32]_x2", - "arguments": [ - "svcount_t png", - "float32_t const * rn" - ], - "return_type": { - "value": "svfloat32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_f32]_x4", - "arguments": [ - "svcount_t png", - "float32_t const * rn" - ], - "return_type": { - "value": "svfloat32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1[_f64]", @@ -58682,21 +58408,21 @@ ] }, { - "SIMD_ISA": "SVE2", - "name": "svld1[_f64]_x2", + "SIMD_ISA": "SVE", + "name": "svld1[_s16]", "arguments": [ - "svcount_t png", - "float64_t const * rn" + "svbool_t pg", + "const int16_t *base" ], "return_type": { - "value": "svfloat64x2_t" + "value": "svint16_t" }, "Arguments_Preparation": { - "png": { - "register": "PNreg1" + "base": { + "register": "Xbase" }, - "rn": { - "register": "Xreg1" + "pg": { + "register": "Pg.H" } }, "Architectures": [ @@ -58704,26 +58430,29 @@ ], "instructions": [ [ - "LD1D" + "LD1H" + ], + [ + "LD1H" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svld1[_f64]_x4", + "SIMD_ISA": "SVE", + "name": "svld1[_s32]", "arguments": [ - "svcount_t png", - "float64_t const * rn" + "svbool_t pg", + "const int32_t *base" ], "return_type": { - "value": "svfloat64x4_t" + "value": "svint32_t" }, "Arguments_Preparation": { - "png": { - "register": "PNreg1" + "base": { + "register": "Xbase" }, - "rn": { - "register": "Xreg1" + "pg": { + "register": "Pg.S" } }, "Architectures": [ @@ -58731,26 +58460,29 @@ ], "instructions": [ [ - "LD1D" + "LD1W" + ], + [ + "LD1W" ] ] }, { "SIMD_ISA": "SVE", - "name": "svld1[_s16]", + "name": "svld1[_s64]", "arguments": [ "svbool_t pg", - "const int16_t *base" + "const int64_t *base" ], "return_type": { - "value": "svint16_t" + "value": "svint64_t" }, "Arguments_Preparation": { "base": { "register": "Xbase" }, "pg": { - "register": "Pg.H" + "register": "Pg.D" } }, "Architectures": [ @@ -58758,29 +58490,29 @@ ], "instructions": [ [ - "LD1H" + "LD1D" ], [ - "LD1H" + "LD1D" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svld1[_s16]_x2", + "SIMD_ISA": "SVE", + "name": "svld1[_s8]", "arguments": [ - "svcount_t png", - "int16_t const * rn" + "svbool_t pg", + "const int8_t *base" ], "return_type": { - "value": "svint16x2_t" + "value": "svint8_t" }, "Arguments_Preparation": { - "png": { - "register": "PNreg1" + "base": { + "register": "Xbase" }, - "rn": { - "register": "Xreg1" + "pg": { + "register": "Pg.B" } }, "Architectures": [ @@ -58788,32 +58520,38 @@ ], "instructions": [ [ - "LD1H" + "LD1B" + ], + [ + "LD1B" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svld1[_s16]_x4", + "SIMD_ISA": "SVE", + "name": "svld1[_u16]", "arguments": [ - "svcount_t png", - "int16_t const * rn" + "svbool_t pg", + "const uint16_t *base" ], "return_type": { - "value": "svint16x4_t" + "value": "svuint16_t" }, "Arguments_Preparation": { - "png": { - "register": "PNreg1" + "base": { + "register": "Xbase" }, - "rn": { - "register": "Xreg1" + "pg": { + "register": "Pg.H" } }, "Architectures": [ "A64" ], "instructions": [ + [ + "LD1H" + ], [ "LD1H" ] @@ -58821,13 +58559,13 @@ }, { "SIMD_ISA": "SVE", - "name": "svld1[_s32]", + "name": "svld1[_u32]", "arguments": [ "svbool_t pg", - "const int32_t *base" + "const uint32_t *base" ], "return_type": { - "value": "svint32_t" + "value": "svuint32_t" }, "Arguments_Preparation": { "base": { @@ -58849,396 +58587,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_s32]_x2", - "arguments": [ - "svcount_t png", - "int32_t const * rn" - ], - "return_type": { - "value": "svint32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_s32]_x4", - "arguments": [ - "svcount_t png", - "int32_t const * rn" - ], - "return_type": { - "value": "svint32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1W" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svld1[_s64]", - "arguments": [ - "svbool_t pg", - "const int64_t *base" - ], - "return_type": { - "value": "svint64_t" - }, - "Arguments_Preparation": { - "base": { - "register": "Xbase" - }, - "pg": { - "register": "Pg.D" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1D" - ], - [ - "LD1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_s64]_x2", - "arguments": [ - "svcount_t png", - "int64_t const * rn" - ], - "return_type": { - "value": "svint64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_s64]_x4", - "arguments": [ - "svcount_t png", - "int64_t const * rn" - ], - "return_type": { - "value": "svint64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1D" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svld1[_s8]", - "arguments": [ - "svbool_t pg", - "const int8_t *base" - ], - "return_type": { - "value": "svint8_t" - }, - "Arguments_Preparation": { - "base": { - "register": "Xbase" - }, - "pg": { - "register": "Pg.B" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1B" - ], - [ - "LD1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_s8]_x2", - "arguments": [ - "svcount_t png", - "int8_t const * rn" - ], - "return_type": { - "value": "svint8x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_s8]_x4", - "arguments": [ - "svcount_t png", - "int8_t const * rn" - ], - "return_type": { - "value": "svint8x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1B" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svld1[_u16]", - "arguments": [ - "svbool_t pg", - "const uint16_t *base" - ], - "return_type": { - "value": "svuint16_t" - }, - "Arguments_Preparation": { - "base": { - "register": "Xbase" - }, - "pg": { - "register": "Pg.H" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1H" - ], - [ - "LD1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_u16]_x2", - "arguments": [ - "svcount_t png", - "uint16_t const * rn" - ], - "return_type": { - "value": "svuint16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_u16]_x4", - "arguments": [ - "svcount_t png", - "uint16_t const * rn" - ], - "return_type": { - "value": "svuint16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1H" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svld1[_u32]", - "arguments": [ - "svbool_t pg", - "const uint32_t *base" - ], - "return_type": { - "value": "svuint32_t" - }, - "Arguments_Preparation": { - "base": { - "register": "Xbase" - }, - "pg": { - "register": "Pg.S" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1W" - ], - [ - "LD1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_u32]_x2", - "arguments": [ - "svcount_t png", - "uint32_t const * rn" - ], - "return_type": { - "value": "svuint32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_u32]_x4", - "arguments": [ - "svcount_t png", - "uint32_t const * rn" - ], - "return_type": { - "value": "svuint32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1[_u64]", @@ -59269,60 +58617,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_u64]_x2", - "arguments": [ - "svcount_t png", - "uint64_t const * rn" - ], - "return_type": { - "value": "svuint64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_u64]_x4", - "arguments": [ - "svcount_t png", - "uint64_t const * rn" - ], - "return_type": { - "value": "svuint64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1[_u8]", @@ -59353,60 +58647,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_u8]_x2", - "arguments": [ - "svcount_t png", - "uint8_t const * rn" - ], - "return_type": { - "value": "svuint8x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1[_u8]_x4", - "arguments": [ - "svcount_t png", - "uint8_t const * rn" - ], - "return_type": { - "value": "svuint8x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LD1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_gather[_u32base]_f32", @@ -60755,72 +59995,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_f16]_x2", - "arguments": [ - "svcount_t png", - "float16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_f16]_x4", - "arguments": [ - "svcount_t png", - "float16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_vnum[_f32]", @@ -60855,72 +60029,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_f32]_x2", - "arguments": [ - "svcount_t png", - "float32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_f32]_x4", - "arguments": [ - "svcount_t png", - "float32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_vnum[_f64]", @@ -60955,72 +60063,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_f64]_x2", - "arguments": [ - "svcount_t png", - "float64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_f64]_x4", - "arguments": [ - "svcount_t png", - "float64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_vnum[_s16]", @@ -61055,72 +60097,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_s16]_x2", - "arguments": [ - "svcount_t png", - "int16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_s16]_x4", - "arguments": [ - "svcount_t png", - "int16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_vnum[_s32]", @@ -61155,72 +60131,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_s32]_x2", - "arguments": [ - "svcount_t png", - "int32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_s32]_x4", - "arguments": [ - "svcount_t png", - "int32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_vnum[_s64]", @@ -61255,72 +60165,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_s64]_x2", - "arguments": [ - "svcount_t png", - "int64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_s64]_x4", - "arguments": [ - "svcount_t png", - "int64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_vnum[_s8]", @@ -61355,72 +60199,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_s8]_x2", - "arguments": [ - "svcount_t png", - "int8_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint8x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MUL", - "LD1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_s8]_x4", - "arguments": [ - "svcount_t png", - "int8_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint8x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MUL", - "LD1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_vnum[_u16]", @@ -61455,72 +60233,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_u16]_x2", - "arguments": [ - "svcount_t png", - "uint16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_u16]_x4", - "arguments": [ - "svcount_t png", - "uint16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_vnum[_u32]", @@ -61555,72 +60267,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_u32]_x2", - "arguments": [ - "svcount_t png", - "uint32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_u32]_x4", - "arguments": [ - "svcount_t png", - "uint32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_vnum[_u64]", @@ -61655,72 +60301,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_u64]_x2", - "arguments": [ - "svcount_t png", - "uint64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_u64]_x4", - "arguments": [ - "svcount_t png", - "uint64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LD1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1_vnum[_u8]", @@ -61755,72 +60335,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_u8]_x2", - "arguments": [ - "svcount_t png", - "uint8_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint8x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MUL", - "LD1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svld1_vnum[_u8]_x4", - "arguments": [ - "svcount_t png", - "uint8_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint8x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MUL", - "LD1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svld1ro[_f16]", @@ -79033,60 +77547,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_f16]_x2", - "arguments": [ - "svcount_t png", - "float16_t const * rn" - ], - "return_type": { - "value": "svfloat16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_f16]_x4", - "arguments": [ - "svcount_t png", - "float16_t const * rn" - ], - "return_type": { - "value": "svfloat16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1[_f32]", @@ -79117,60 +77577,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_f32]_x2", - "arguments": [ - "svcount_t png", - "float32_t const * rn" - ], - "return_type": { - "value": "svfloat32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_f32]_x4", - "arguments": [ - "svcount_t png", - "float32_t const * rn" - ], - "return_type": { - "value": "svfloat32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1[_f64]", @@ -79201,60 +77607,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_f64]_x2", - "arguments": [ - "svcount_t png", - "float64_t const * rn" - ], - "return_type": { - "value": "svfloat64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_f64]_x4", - "arguments": [ - "svcount_t png", - "float64_t const * rn" - ], - "return_type": { - "value": "svfloat64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1[_s16]", @@ -79285,60 +77637,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_s16]_x2", - "arguments": [ - "svcount_t png", - "int16_t const * rn" - ], - "return_type": { - "value": "svint16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_s16]_x4", - "arguments": [ - "svcount_t png", - "int16_t const * rn" - ], - "return_type": { - "value": "svint16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1[_s32]", @@ -79369,60 +77667,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_s32]_x2", - "arguments": [ - "svcount_t png", - "int32_t const * rn" - ], - "return_type": { - "value": "svint32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_s32]_x4", - "arguments": [ - "svcount_t png", - "int32_t const * rn" - ], - "return_type": { - "value": "svint32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1[_s64]", @@ -79453,60 +77697,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_s64]_x2", - "arguments": [ - "svcount_t png", - "int64_t const * rn" - ], - "return_type": { - "value": "svint64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_s64]_x4", - "arguments": [ - "svcount_t png", - "int64_t const * rn" - ], - "return_type": { - "value": "svint64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1[_s8]", @@ -79537,60 +77727,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_s8]_x2", - "arguments": [ - "svcount_t png", - "int8_t const * rn" - ], - "return_type": { - "value": "svint8x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_s8]_x4", - "arguments": [ - "svcount_t png", - "int8_t const * rn" - ], - "return_type": { - "value": "svint8x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1[_u16]", @@ -79621,60 +77757,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_u16]_x2", - "arguments": [ - "svcount_t png", - "uint16_t const * rn" - ], - "return_type": { - "value": "svuint16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_u16]_x4", - "arguments": [ - "svcount_t png", - "uint16_t const * rn" - ], - "return_type": { - "value": "svuint16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1[_u32]", @@ -79705,60 +77787,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_u32]_x2", - "arguments": [ - "svcount_t png", - "uint32_t const * rn" - ], - "return_type": { - "value": "svuint32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_u32]_x4", - "arguments": [ - "svcount_t png", - "uint32_t const * rn" - ], - "return_type": { - "value": "svuint32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1[_u64]", @@ -79789,60 +77817,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_u64]_x2", - "arguments": [ - "svcount_t png", - "uint64_t const * rn" - ], - "return_type": { - "value": "svuint64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_u64]_x4", - "arguments": [ - "svcount_t png", - "uint64_t const * rn" - ], - "return_type": { - "value": "svuint64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1[_u8]", @@ -79873,60 +77847,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_u8]_x2", - "arguments": [ - "svcount_t png", - "uint8_t const * rn" - ], - "return_type": { - "value": "svuint8x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1[_u8]_x4", - "arguments": [ - "svcount_t png", - "uint8_t const * rn" - ], - "return_type": { - "value": "svuint8x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "LDNT1B" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svldnt1_gather[_u32base]_f32", @@ -80960,72 +78880,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_f16]_x2", - "arguments": [ - "svcount_t png", - "float16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_f16]_x4", - "arguments": [ - "svcount_t png", - "float16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1_vnum[_f32]", @@ -81060,72 +78914,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_f32]_x2", - "arguments": [ - "svcount_t png", - "float32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_f32]_x4", - "arguments": [ - "svcount_t png", - "float32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1_vnum[_f64]", @@ -81160,72 +78948,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_f64]_x2", - "arguments": [ - "svcount_t png", - "float64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_f64]_x4", - "arguments": [ - "svcount_t png", - "float64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svfloat64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1_vnum[_s16]", @@ -81260,72 +78982,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_s16]_x2", - "arguments": [ - "svcount_t png", - "int16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_s16]_x4", - "arguments": [ - "svcount_t png", - "int16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1_vnum[_s32]", @@ -81360,72 +79016,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_s32]_x2", - "arguments": [ - "svcount_t png", - "int32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_s32]_x4", - "arguments": [ - "svcount_t png", - "int32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1_vnum[_s64]", @@ -81460,72 +79050,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_s64]_x2", - "arguments": [ - "svcount_t png", - "int64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_s64]_x4", - "arguments": [ - "svcount_t png", - "int64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1_vnum[_s8]", @@ -81560,72 +79084,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_s8]_x2", - "arguments": [ - "svcount_t png", - "int8_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint8x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MUL", - "LDNT1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_s8]_x4", - "arguments": [ - "svcount_t png", - "int8_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svint8x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MUL", - "LDNT1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1_vnum[_u16]", @@ -81660,72 +79118,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_u16]_x2", - "arguments": [ - "svcount_t png", - "uint16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint16x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_u16]_x4", - "arguments": [ - "svcount_t png", - "uint16_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint16x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1_vnum[_u32]", @@ -81760,72 +79152,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_u32]_x2", - "arguments": [ - "svcount_t png", - "uint32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint32x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_u32]_x4", - "arguments": [ - "svcount_t png", - "uint32_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint32x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1_vnum[_u64]", @@ -81860,72 +79186,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_u64]_x2", - "arguments": [ - "svcount_t png", - "uint64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint64x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_u64]_x4", - "arguments": [ - "svcount_t png", - "uint64_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint64x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "LDNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svldnt1_vnum[_u8]", @@ -81960,72 +79220,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_u8]_x2", - "arguments": [ - "svcount_t png", - "uint8_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint8x2_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MUL", - "LDNT1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svldnt1_vnum[_u8]_x4", - "arguments": [ - "svcount_t png", - "uint8_t const * rn", - "int64_t vnum" - ], - "return_type": { - "value": "svuint8x4_t" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "vnum": { - "register": "Xreg3" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MUL", - "LDNT1B" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svldnt1sb_gather[_u32base]_offset_s32", @@ -127681,222 +124875,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svpext_lane_c16", - "arguments": [ - "svcount_t pnn", - "uint64_t imm" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "imm": { - "immediate": "imm1" - }, - "pnn": { - "register": "PNreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PEXT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svpext_lane_c16_x2", - "arguments": [ - "svcount_t pnn", - "uint64_t imm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "imm": { - "immediate": "imm1" - }, - "pnn": { - "register": "PNreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PEXT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svpext_lane_c32", - "arguments": [ - "svcount_t pnn", - "uint64_t imm" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "imm": { - "immediate": "imm1" - }, - "pnn": { - "register": "PNreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PEXT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svpext_lane_c32_x2", - "arguments": [ - "svcount_t pnn", - "uint64_t imm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "imm": { - "immediate": "imm1" - }, - "pnn": { - "register": "PNreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PEXT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svpext_lane_c64", - "arguments": [ - "svcount_t pnn", - "uint64_t imm" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "imm": { - "immediate": "imm1" - }, - "pnn": { - "register": "PNreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PEXT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svpext_lane_c64_x2", - "arguments": [ - "svcount_t pnn", - "uint64_t imm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "imm": { - "immediate": "imm1" - }, - "pnn": { - "register": "PNreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PEXT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svpext_lane_c8", - "arguments": [ - "svcount_t pnn", - "uint64_t imm" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "imm": { - "immediate": "imm1" - }, - "pnn": { - "register": "PNreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PEXT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svpext_lane_c8_x2", - "arguments": [ - "svcount_t pnn", - "uint64_t imm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "imm": { - "immediate": "imm1" - }, - "pnn": { - "register": "PNreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PEXT" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svpfalse[_b]", @@ -127914,23 +124892,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svpfalse_c", - "arguments": [], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": {}, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PFALSE" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svpfirst[_b]", @@ -130064,130 +127025,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svpsel_lane_c16", - "arguments": [ - "svcount_t pn", - "svbool_t pm", - "uint32_t idx" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "idx": { - "index": "[Wreg1, imm1]" - }, - "pm": { - "register": "Preg3" - }, - "pn": { - "register": "Preg2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PSEL" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svpsel_lane_c32", - "arguments": [ - "svcount_t pn", - "svbool_t pm", - "uint32_t idx" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "idx": { - "index": "[Wreg1, imm1]" - }, - "pm": { - "register": "Preg3" - }, - "pn": { - "register": "Preg2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PSEL" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svpsel_lane_c64", - "arguments": [ - "svcount_t pn", - "svbool_t pm", - "uint32_t idx" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "idx": { - "index": "[Wreg1, imm1]" - }, - "pm": { - "register": "Preg3" - }, - "pn": { - "register": "Preg2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PSEL" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svpsel_lane_c8", - "arguments": [ - "svcount_t pn", - "svbool_t pm", - "uint32_t idx" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "idx": { - "index": "[Wreg1, imm1]" - }, - "pm": { - "register": "Preg3" - }, - "pn": { - "register": "Preg2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PSEL" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svptest_any", @@ -130298,74 +127135,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svptrue_c16", - "arguments": [], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": {}, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PTRUE" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svptrue_c32", - "arguments": [], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": {}, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PTRUE" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svptrue_c64", - "arguments": [], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": {}, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PTRUE" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svptrue_c8", - "arguments": [], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": {}, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "PTRUE" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svptrue_pat_b16", @@ -152948,32 +149717,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svreinterpret[_b]", - "arguments": [ - "svcount_t count" - ], - "return_type": { - "value": "svbool_t" - }, - "Architectures": [ - "A64" - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svreinterpret[_c]", - "arguments": [ - "svbool_t pg" - ], - "return_type": { - "value": "svcount_t" - }, - "Architectures": [ - "A64" - ] - }, { "SIMD_ISA": "SVE", "name": "svreinterpret_f16[_f16]", @@ -166248,21 +162991,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svset2[_b]", - "arguments": [ - "svboolx2_t tuple", - "uint64_t imm_index", - "svbool_t x" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Architectures": [ - "A64" - ] - }, { "SIMD_ISA": "SVE", "name": "svset2[_f16]", @@ -166593,21 +163321,6 @@ "A64" ] }, - { - "SIMD_ISA": "SVE2", - "name": "svset4[_b]", - "arguments": [ - "svboolx4_t tuple", - "uint64_t imm_index", - "svbool_t x" - ], - "return_type": { - "value": "svboolx4_t" - }, - "Architectures": [ - "A64" - ] - }, { "SIMD_ISA": "SVE", "name": "svset4[_f16]", @@ -169853,68 +166566,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_f16_x2]", - "arguments": [ - "svcount_t png", - "float16_t * rn", - "svfloat16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_f16_x4]", - "arguments": [ - "svcount_t png", - "float16_t * rn", - "svfloat16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1[_f32]", @@ -169949,68 +166600,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_f32_x2]", - "arguments": [ - "svcount_t png", - "float32_t * rn", - "svfloat32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_f32_x4]", - "arguments": [ - "svcount_t png", - "float32_t * rn", - "svfloat32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1[_f64]", @@ -170045,68 +166634,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_f64_x2]", - "arguments": [ - "svcount_t png", - "float64_t * rn", - "svfloat64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_f64_x4]", - "arguments": [ - "svcount_t png", - "float64_t * rn", - "svfloat64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1[_s16]", @@ -170141,68 +166668,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_s16_x2]", - "arguments": [ - "svcount_t png", - "int16_t * rn", - "svint16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_s16_x4]", - "arguments": [ - "svcount_t png", - "int16_t * rn", - "svint16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1[_s32]", @@ -170237,68 +166702,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_s32_x2]", - "arguments": [ - "svcount_t png", - "int32_t * rn", - "svint32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_s32_x4]", - "arguments": [ - "svcount_t png", - "int32_t * rn", - "svint32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1[_s64]", @@ -170333,68 +166736,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_s64_x2]", - "arguments": [ - "svcount_t png", - "int64_t * rn", - "svint64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_s64_x4]", - "arguments": [ - "svcount_t png", - "int64_t * rn", - "svint64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1[_s8]", @@ -170429,68 +166770,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_s8_x2]", - "arguments": [ - "svcount_t png", - "int8_t * rn", - "svint8x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B, Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_s8_x4]", - "arguments": [ - "svcount_t png", - "int8_t * rn", - "svint8x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B - Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1[_u16]", @@ -170525,68 +166804,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_u16_x2]", - "arguments": [ - "svcount_t png", - "uint16_t * rn", - "svuint16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_u16_x4]", - "arguments": [ - "svcount_t png", - "uint16_t * rn", - "svuint16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1[_u32]", @@ -170621,68 +166838,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_u32_x2]", - "arguments": [ - "svcount_t png", - "uint32_t * rn", - "svuint32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_u32_x4]", - "arguments": [ - "svcount_t png", - "uint32_t * rn", - "svuint32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1[_u64]", @@ -170717,68 +166872,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_u64_x2]", - "arguments": [ - "svcount_t png", - "uint64_t * rn", - "svuint64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_u64_x4]", - "arguments": [ - "svcount_t png", - "uint64_t * rn", - "svuint64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1[_u8]", @@ -170813,68 +166906,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_u8_x2]", - "arguments": [ - "svcount_t png", - "uint8_t * rn", - "svuint8x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B, Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1[_u8_x4]", - "arguments": [ - "svcount_t png", - "uint8_t * rn", - "svuint8x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B - Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "ST1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_scatter[_u32base]_index[_f32]", @@ -172395,80 +168426,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_f16_x2]", - "arguments": [ - "svcount_t png", - "float16_t * rn", - "int64_t vnum", - "svfloat16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_f16_x4]", - "arguments": [ - "svcount_t png", - "float16_t * rn", - "int64_t vnum", - "svfloat16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_vnum[_f32]", @@ -172507,80 +168464,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_f32_x2]", - "arguments": [ - "svcount_t png", - "float32_t * rn", - "int64_t vnum", - "svfloat32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_f32_x4]", - "arguments": [ - "svcount_t png", - "float32_t * rn", - "int64_t vnum", - "svfloat32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_vnum[_f64]", @@ -172619,80 +168502,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_f64_x2]", - "arguments": [ - "svcount_t png", - "float64_t * rn", - "int64_t vnum", - "svfloat64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_f64_x4]", - "arguments": [ - "svcount_t png", - "float64_t * rn", - "int64_t vnum", - "svfloat64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_vnum[_s16]", @@ -172731,80 +168540,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_s16_x2]", - "arguments": [ - "svcount_t png", - "int16_t * rn", - "int64_t vnum", - "svint16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_s16_x4]", - "arguments": [ - "svcount_t png", - "int16_t * rn", - "int64_t vnum", - "svint16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_vnum[_s32]", @@ -172843,80 +168578,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_s32_x2]", - "arguments": [ - "svcount_t png", - "int32_t * rn", - "int64_t vnum", - "svint32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_s32_x4]", - "arguments": [ - "svcount_t png", - "int32_t * rn", - "int64_t vnum", - "svint32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_vnum[_s64]", @@ -172955,80 +168616,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_s64_x2]", - "arguments": [ - "svcount_t png", - "int64_t * rn", - "int64_t vnum", - "svint64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_s64_x4]", - "arguments": [ - "svcount_t png", - "int64_t * rn", - "int64_t vnum", - "svint64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_vnum[_s8]", @@ -173067,80 +168654,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_s8_x2]", - "arguments": [ - "svcount_t png", - "int8_t * rn", - "int64_t vnum", - "svint8x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B, Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_s8_x4]", - "arguments": [ - "svcount_t png", - "int8_t * rn", - "int64_t vnum", - "svint8x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B - Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_vnum[_u16]", @@ -173179,80 +168692,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_u16_x2]", - "arguments": [ - "svcount_t png", - "uint16_t * rn", - "int64_t vnum", - "svuint16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_u16_x4]", - "arguments": [ - "svcount_t png", - "uint16_t * rn", - "int64_t vnum", - "svuint16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_vnum[_u32]", @@ -173291,80 +168730,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_u32_x2]", - "arguments": [ - "svcount_t png", - "uint32_t * rn", - "int64_t vnum", - "svuint32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_u32_x4]", - "arguments": [ - "svcount_t png", - "uint32_t * rn", - "int64_t vnum", - "svuint32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_vnum[_u64]", @@ -173403,80 +168768,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_u64_x2]", - "arguments": [ - "svcount_t png", - "uint64_t * rn", - "int64_t vnum", - "svuint64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_u64_x4]", - "arguments": [ - "svcount_t png", - "uint64_t * rn", - "int64_t vnum", - "svuint64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1_vnum[_u8]", @@ -173515,80 +168806,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_u8_x2]", - "arguments": [ - "svcount_t png", - "uint8_t * rn", - "int64_t vnum", - "svuint8x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B, Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svst1_vnum[_u8_x4]", - "arguments": [ - "svcount_t png", - "uint8_t * rn", - "int64_t vnum", - "svuint8x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B - Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "ST1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svst1b[_s16]", @@ -178901,68 +174118,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_f16_x2]", - "arguments": [ - "svcount_t png", - "float16_t * rn", - "svfloat16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_f16_x4]", - "arguments": [ - "svcount_t png", - "float16_t * rn", - "svfloat16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1[_f32]", @@ -178997,68 +174152,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_f32_x2]", - "arguments": [ - "svcount_t png", - "float32_t * rn", - "svfloat32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_f32_x4]", - "arguments": [ - "svcount_t png", - "float32_t * rn", - "svfloat32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1[_f64]", @@ -179093,68 +174186,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_f64_x2]", - "arguments": [ - "svcount_t png", - "float64_t * rn", - "svfloat64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_f64_x4]", - "arguments": [ - "svcount_t png", - "float64_t * rn", - "svfloat64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1[_s16]", @@ -179189,68 +174220,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_s16_x2]", - "arguments": [ - "svcount_t png", - "int16_t * rn", - "svint16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_s16_x4]", - "arguments": [ - "svcount_t png", - "int16_t * rn", - "svint16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1[_s32]", @@ -179285,68 +174254,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_s32_x2]", - "arguments": [ - "svcount_t png", - "int32_t * rn", - "svint32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_s32_x4]", - "arguments": [ - "svcount_t png", - "int32_t * rn", - "svint32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1[_s64]", @@ -179381,68 +174288,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_s64_x2]", - "arguments": [ - "svcount_t png", - "int64_t * rn", - "svint64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_s64_x4]", - "arguments": [ - "svcount_t png", - "int64_t * rn", - "svint64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1[_s8]", @@ -179477,68 +174322,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_s8_x2]", - "arguments": [ - "svcount_t png", - "int8_t * rn", - "svint8x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B, Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_s8_x4]", - "arguments": [ - "svcount_t png", - "int8_t * rn", - "svint8x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B - Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1[_u16]", @@ -179573,68 +174356,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_u16_x2]", - "arguments": [ - "svcount_t png", - "uint16_t * rn", - "svuint16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_u16_x4]", - "arguments": [ - "svcount_t png", - "uint16_t * rn", - "svuint16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1[_u32]", @@ -179669,68 +174390,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_u32_x2]", - "arguments": [ - "svcount_t png", - "uint32_t * rn", - "svuint32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_u32_x4]", - "arguments": [ - "svcount_t png", - "uint32_t * rn", - "svuint32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1[_u64]", @@ -179765,68 +174424,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_u64_x2]", - "arguments": [ - "svcount_t png", - "uint64_t * rn", - "svuint64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_u64_x4]", - "arguments": [ - "svcount_t png", - "uint64_t * rn", - "svuint64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1[_u8]", @@ -179861,68 +174458,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_u8_x2]", - "arguments": [ - "svcount_t png", - "uint8_t * rn", - "svuint8x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B, Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1[_u8_x4]", - "arguments": [ - "svcount_t png", - "uint8_t * rn", - "svuint8x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg1" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B - Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "STNT1B" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svstnt1_scatter[_u32base]_index[_f32]", @@ -181092,80 +175627,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_f16_x2]", - "arguments": [ - "svcount_t png", - "float16_t * rn", - "int64_t vnum", - "svfloat16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_f16_x4]", - "arguments": [ - "svcount_t png", - "float16_t * rn", - "int64_t vnum", - "svfloat16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1_vnum[_f32]", @@ -181204,80 +175665,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_f32_x2]", - "arguments": [ - "svcount_t png", - "float32_t * rn", - "int64_t vnum", - "svfloat32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_f32_x4]", - "arguments": [ - "svcount_t png", - "float32_t * rn", - "int64_t vnum", - "svfloat32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1_vnum[_f64]", @@ -181316,80 +175703,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_f64_x2]", - "arguments": [ - "svcount_t png", - "float64_t * rn", - "int64_t vnum", - "svfloat64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_f64_x4]", - "arguments": [ - "svcount_t png", - "float64_t * rn", - "int64_t vnum", - "svfloat64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1_vnum[_s16]", @@ -181428,80 +175741,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_s16_x2]", - "arguments": [ - "svcount_t png", - "int16_t * rn", - "int64_t vnum", - "svint16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_s16_x4]", - "arguments": [ - "svcount_t png", - "int16_t * rn", - "int64_t vnum", - "svint16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1_vnum[_s32]", @@ -181540,80 +175779,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_s32_x2]", - "arguments": [ - "svcount_t png", - "int32_t * rn", - "int64_t vnum", - "svint32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_s32_x4]", - "arguments": [ - "svcount_t png", - "int32_t * rn", - "int64_t vnum", - "svint32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1_vnum[_s64]", @@ -181652,80 +175817,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_s64_x2]", - "arguments": [ - "svcount_t png", - "int64_t * rn", - "int64_t vnum", - "svint64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_s64_x4]", - "arguments": [ - "svcount_t png", - "int64_t * rn", - "int64_t vnum", - "svint64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1_vnum[_s8]", @@ -181764,80 +175855,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_s8_x2]", - "arguments": [ - "svcount_t png", - "int8_t * rn", - "int64_t vnum", - "svint8x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B, Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_s8_x4]", - "arguments": [ - "svcount_t png", - "int8_t * rn", - "int64_t vnum", - "svint8x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B - Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1B" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1_vnum[_u16]", @@ -181876,80 +175893,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_u16_x2]", - "arguments": [ - "svcount_t png", - "uint16_t * rn", - "int64_t vnum", - "svuint16x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H, Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1H" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_u16_x4]", - "arguments": [ - "svcount_t png", - "uint16_t * rn", - "int64_t vnum", - "svuint16x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.H - Zreg2.H }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1H" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1_vnum[_u32]", @@ -181988,80 +175931,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_u32_x2]", - "arguments": [ - "svcount_t png", - "uint32_t * rn", - "int64_t vnum", - "svuint32x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S, Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1W" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_u32_x4]", - "arguments": [ - "svcount_t png", - "uint32_t * rn", - "int64_t vnum", - "svuint32x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.S - Zreg2.S }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1W" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1_vnum[_u64]", @@ -182100,80 +175969,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_u64_x2]", - "arguments": [ - "svcount_t png", - "uint64_t * rn", - "int64_t vnum", - "svuint64x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D, Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1D" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_u64_x4]", - "arguments": [ - "svcount_t png", - "uint64_t * rn", - "int64_t vnum", - "svuint64x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.D - Zreg2.D }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1D" - ] - ] - }, { "SIMD_ISA": "SVE", "name": "svstnt1_vnum[_u8]", @@ -182212,80 +176007,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_u8_x2]", - "arguments": [ - "svcount_t png", - "uint8_t * rn", - "int64_t vnum", - "svuint8x2_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B, Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1B" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svstnt1_vnum[_u8_x4]", - "arguments": [ - "svcount_t png", - "uint8_t * rn", - "int64_t vnum", - "svuint8x4_t zt" - ], - "return_type": { - "value": "void" - }, - "Arguments_Preparation": { - "png": { - "register": "PNreg1" - }, - "rn": { - "register": "Xreg2" - }, - "vnum": { - "register": "Xreg3" - }, - "zt": { - "Z multi-vector": "{ Zreg1.B - Zreg2.B }" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "RDVL", - "MADD", - "STNT1B" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svstnt1b_scatter[_u32base]_offset[_s32]", @@ -193969,17 +187690,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svundef2_b", - "arguments": [], - "return_type": { - "value": "svboolx2_t" - }, - "Architectures": [ - "A64" - ] - }, { "SIMD_ISA": "SVE", "name": "svundef2_f16", @@ -194222,17 +187932,6 @@ "A64" ] }, - { - "SIMD_ISA": "SVE2", - "name": "svundef4_b", - "arguments": [], - "return_type": { - "value": "svboolx4_t" - }, - "Architectures": [ - "A64" - ] - }, { "SIMD_ISA": "SVE", "name": "svundef4_f16", @@ -197212,33 +190911,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_b16[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGE" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svwhilege_b16[_u32]", @@ -197293,33 +190965,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_b16[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHS" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svwhilege_b32[_s32]", @@ -197374,33 +191019,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_b32[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGE" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svwhilege_b32[_u32]", @@ -197455,33 +191073,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_b32[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHS" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svwhilege_b64[_s32]", @@ -197536,33 +191127,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_b64[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGE" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svwhilege_b64[_u32]", @@ -197617,33 +191181,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_b64[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHS" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svwhilege_b8[_s32]", @@ -197698,33 +191235,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_b8[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGE" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svwhilege_b8[_u32]", @@ -197781,175 +191291,20 @@ }, { "SIMD_ISA": "SVE2", - "name": "svwhilege_b8[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHS" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_c16[_s64]", - "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGE" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_c16[_u64]", - "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHS" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_c32[_s64]", - "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGE" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_c32[_u64]", - "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHS" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilege_c64[_s64]", + "name": "svwhilegt_b16[_s32]", "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" + "int32_t op1", + "int32_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Wop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -197957,30 +191312,26 @@ ], "instructions": [ [ - "WHILEGE" + "WHILEGT" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilege_c64[_u64]", + "name": "svwhilegt_b16[_s64]", "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" + "int64_t op1", + "int64_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Xop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -197988,30 +191339,26 @@ ], "instructions": [ [ - "WHILEHS" + "WHILEGT" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilege_c8[_s64]", + "name": "svwhilegt_b16[_u32]", "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" + "uint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Wop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -198019,30 +191366,26 @@ ], "instructions": [ [ - "WHILEGE" + "WHILEHI" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilege_c8[_u64]", + "name": "svwhilegt_b16[_u64]", "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" + "uint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Xop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -198050,175 +191393,13 @@ ], "instructions": [ [ - "WHILEHS" + "WHILEHI" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilegt_b16[_s32]", - "arguments": [ - "int32_t op1", - "int32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b16[_s64]", - "arguments": [ - "int64_t op1", - "int64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b16[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b16[_u32]", - "arguments": [ - "uint32_t op1", - "uint32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b16[_u64]", - "arguments": [ - "uint64_t op1", - "uint64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b16[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b32[_s32]", + "name": "svwhilegt_b32[_s32]", "arguments": [ "int32_t op1", "int32_t op2" @@ -198270,33 +191451,6 @@ ] ] }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b32[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, { "SIMD_ISA": "SVE2", "name": "svwhilegt_b32[_u32]", @@ -198347,1246 +191501,26 @@ ], "instructions": [ [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b32[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b64[_s32]", - "arguments": [ - "int32_t op1", - "int32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b64[_s64]", - "arguments": [ - "int64_t op1", - "int64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b64[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b64[_u32]", - "arguments": [ - "uint32_t op1", - "uint32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b64[_u64]", - "arguments": [ - "uint64_t op1", - "uint64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b64[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b8[_s32]", - "arguments": [ - "int32_t op1", - "int32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b8[_s64]", - "arguments": [ - "int64_t op1", - "int64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b8[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b8[_u32]", - "arguments": [ - "uint32_t op1", - "uint32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b8[_u64]", - "arguments": [ - "uint64_t op1", - "uint64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_b8[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_c16[_s64]", - "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_c16[_u64]", - "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_c32[_s64]", - "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_c32[_u64]", - "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_c64[_s64]", - "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_c64[_u64]", - "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_c8[_s64]", - "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEGT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilegt_c8[_u64]", - "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILEHI" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b16[_s32]", - "arguments": [ - "int32_t op1", - "int32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b16[_s64]", - "arguments": [ - "int64_t op1", - "int64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilele_b16[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b16[_u32]", - "arguments": [ - "uint32_t op1", - "uint32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b16[_u64]", - "arguments": [ - "uint64_t op1", - "uint64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilele_b16[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b32[_s32]", - "arguments": [ - "int32_t op1", - "int32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b32[_s64]", - "arguments": [ - "int64_t op1", - "int64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilele_b32[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b32[_u32]", - "arguments": [ - "uint32_t op1", - "uint32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b32[_u64]", - "arguments": [ - "uint64_t op1", - "uint64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilele_b32[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b64[_s32]", - "arguments": [ - "int32_t op1", - "int32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b64[_s64]", - "arguments": [ - "int64_t op1", - "int64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilele_b64[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b64[_u32]", - "arguments": [ - "uint32_t op1", - "uint32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b64[_u64]", - "arguments": [ - "uint64_t op1", - "uint64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilele_b64[_u64]_x2", - "arguments": [ - "uint64_t rn", - "uint64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b8[_s32]", - "arguments": [ - "int32_t op1", - "int32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b8[_s64]", - "arguments": [ - "int64_t op1", - "int64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilele_b8[_s64]_x2", - "arguments": [ - "int64_t rn", - "int64_t rm" - ], - "return_type": { - "value": "svboolx2_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELE" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b8[_u32]", - "arguments": [ - "uint32_t op1", - "uint32_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Wop1" - }, - "op2": { - "register": "Wop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" - ] - ] - }, - { - "SIMD_ISA": "SVE", - "name": "svwhilele_b8[_u64]", - "arguments": [ - "uint64_t op1", - "uint64_t op2" - ], - "return_type": { - "value": "svbool_t" - }, - "Arguments_Preparation": { - "op1": { - "register": "Xop1" - }, - "op2": { - "register": "Xop2" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELS" + "WHILEHI" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilele_b8[_u64]_x2", + "name": "svwhilegt_b64[_s32]", "arguments": [ - "uint64_t rn", - "uint64_t rm" + "int32_t op1", + "int32_t op2" ], "return_type": { - "value": "svboolx2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" + "op1": { + "register": "Wop1" }, - "rn": { - "register": "Xreg1" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -199594,30 +191528,26 @@ ], "instructions": [ [ - "WHILELS" + "WHILEGT" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilele_c16[_s64]", + "name": "svwhilegt_b64[_s64]", "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" + "int64_t op1", + "int64_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Xop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -199625,30 +191555,26 @@ ], "instructions": [ [ - "WHILELE" + "WHILEGT" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilele_c16[_u64]", + "name": "svwhilegt_b64[_u32]", "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" + "uint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Wop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -199656,30 +191582,26 @@ ], "instructions": [ [ - "WHILELS" + "WHILEHI" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilele_c32[_s64]", + "name": "svwhilegt_b64[_u64]", "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" + "uint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Xop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -199687,30 +191609,26 @@ ], "instructions": [ [ - "WHILELE" + "WHILEHI" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilele_c32[_u64]", + "name": "svwhilegt_b8[_s32]", "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" + "int32_t op1", + "int32_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Wop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -199718,30 +191636,26 @@ ], "instructions": [ [ - "WHILELS" + "WHILEGT" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilele_c64[_s64]", + "name": "svwhilegt_b8[_s64]", "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" + "int64_t op1", + "int64_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Xop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -199749,30 +191663,26 @@ ], "instructions": [ [ - "WHILELE" + "WHILEGT" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilele_c64[_u64]", + "name": "svwhilegt_b8[_u32]", "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" + "uint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Wop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -199780,30 +191690,26 @@ ], "instructions": [ [ - "WHILELS" + "WHILEHI" ] ] }, { "SIMD_ISA": "SVE2", - "name": "svwhilele_c8[_s64]", + "name": "svwhilegt_b8[_u64]", "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" + "uint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Xop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -199811,30 +191717,26 @@ ], "instructions": [ [ - "WHILELE" + "WHILEHI" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilele_c8[_u64]", + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_s32]", "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" + "int32_t op1", + "int32_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Wop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -199842,26 +191744,26 @@ ], "instructions": [ [ - "WHILELS" + "WHILELE" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b16[_s32]", + "name": "svwhilele_b16[_s64]", "arguments": [ - "int32_t op1", - "int32_t op2" + "int64_t op1", + "int64_t op2" ], "return_type": { "value": "svbool_t" }, "Arguments_Preparation": { "op1": { - "register": "Wop1" + "register": "Xop1" }, "op2": { - "register": "Wop2" + "register": "Xop2" } }, "Architectures": [ @@ -199869,26 +191771,26 @@ ], "instructions": [ [ - "WHILELT" + "WHILELE" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b16[_s64]", + "name": "svwhilele_b16[_u32]", "arguments": [ - "int64_t op1", - "int64_t op2" + "uint32_t op1", + "uint32_t op2" ], "return_type": { "value": "svbool_t" }, "Arguments_Preparation": { "op1": { - "register": "Xop1" + "register": "Wop1" }, "op2": { - "register": "Xop2" + "register": "Wop2" } }, "Architectures": [ @@ -199896,26 +191798,26 @@ ], "instructions": [ [ - "WHILELT" + "WHILELS" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_b16[_s64]_x2", + "SIMD_ISA": "SVE", + "name": "svwhilele_b16[_u64]", "arguments": [ - "int64_t rn", - "int64_t rm" + "uint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "svboolx2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" + "op1": { + "register": "Xop1" }, - "rn": { - "register": "Xreg1" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -199923,16 +191825,16 @@ ], "instructions": [ [ - "WHILELT" + "WHILELS" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b16[_u32]", + "name": "svwhilele_b32[_s32]", "arguments": [ - "uint32_t op1", - "uint32_t op2" + "int32_t op1", + "int32_t op2" ], "return_type": { "value": "svbool_t" @@ -199950,16 +191852,16 @@ ], "instructions": [ [ - "WHILELO" + "WHILELE" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b16[_u64]", + "name": "svwhilele_b32[_s64]", "arguments": [ - "uint64_t op1", - "uint64_t op2" + "int64_t op1", + "int64_t op2" ], "return_type": { "value": "svbool_t" @@ -199977,26 +191879,26 @@ ], "instructions": [ [ - "WHILELO" + "WHILELE" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_b16[_u64]_x2", + "SIMD_ISA": "SVE", + "name": "svwhilele_b32[_u32]", "arguments": [ - "uint64_t rn", - "uint64_t rm" + "uint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "svboolx2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" + "op1": { + "register": "Wop1" }, - "rn": { - "register": "Xreg1" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -200004,26 +191906,26 @@ ], "instructions": [ [ - "WHILELO" + "WHILELS" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b32[_s32]", + "name": "svwhilele_b32[_u64]", "arguments": [ - "int32_t op1", - "int32_t op2" + "uint64_t op1", + "uint64_t op2" ], "return_type": { "value": "svbool_t" }, "Arguments_Preparation": { "op1": { - "register": "Wop1" + "register": "Xop1" }, "op2": { - "register": "Wop2" + "register": "Xop2" } }, "Architectures": [ @@ -200031,26 +191933,26 @@ ], "instructions": [ [ - "WHILELT" + "WHILELS" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b32[_s64]", + "name": "svwhilele_b64[_s32]", "arguments": [ - "int64_t op1", - "int64_t op2" + "int32_t op1", + "int32_t op2" ], "return_type": { "value": "svbool_t" }, "Arguments_Preparation": { "op1": { - "register": "Xop1" + "register": "Wop1" }, "op2": { - "register": "Xop2" + "register": "Wop2" } }, "Architectures": [ @@ -200058,26 +191960,26 @@ ], "instructions": [ [ - "WHILELT" + "WHILELE" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_b32[_s64]_x2", + "SIMD_ISA": "SVE", + "name": "svwhilele_b64[_s64]", "arguments": [ - "int64_t rn", - "int64_t rm" + "int64_t op1", + "int64_t op2" ], "return_type": { - "value": "svboolx2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" + "op1": { + "register": "Xop1" }, - "rn": { - "register": "Xreg1" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -200085,13 +191987,13 @@ ], "instructions": [ [ - "WHILELT" + "WHILELE" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b32[_u32]", + "name": "svwhilele_b64[_u32]", "arguments": [ "uint32_t op1", "uint32_t op2" @@ -200112,13 +192014,13 @@ ], "instructions": [ [ - "WHILELO" + "WHILELS" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b32[_u64]", + "name": "svwhilele_b64[_u64]", "arguments": [ "uint64_t op1", "uint64_t op2" @@ -200139,26 +192041,26 @@ ], "instructions": [ [ - "WHILELO" + "WHILELS" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_b32[_u64]_x2", + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_s32]", "arguments": [ - "uint64_t rn", - "uint64_t rm" + "int32_t op1", + "int32_t op2" ], "return_type": { - "value": "svboolx2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" + "op1": { + "register": "Wop1" }, - "rn": { - "register": "Xreg1" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -200166,26 +192068,26 @@ ], "instructions": [ [ - "WHILELO" + "WHILELE" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b64[_s32]", + "name": "svwhilele_b8[_s64]", "arguments": [ - "int32_t op1", - "int32_t op2" + "int64_t op1", + "int64_t op2" ], "return_type": { "value": "svbool_t" }, "Arguments_Preparation": { "op1": { - "register": "Wop1" + "register": "Xop1" }, "op2": { - "register": "Wop2" + "register": "Xop2" } }, "Architectures": [ @@ -200193,26 +192095,26 @@ ], "instructions": [ [ - "WHILELT" + "WHILELE" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b64[_s64]", + "name": "svwhilele_b8[_u32]", "arguments": [ - "int64_t op1", - "int64_t op2" + "uint32_t op1", + "uint32_t op2" ], "return_type": { "value": "svbool_t" }, "Arguments_Preparation": { "op1": { - "register": "Xop1" + "register": "Wop1" }, "op2": { - "register": "Xop2" + "register": "Wop2" } }, "Architectures": [ @@ -200220,26 +192122,26 @@ ], "instructions": [ [ - "WHILELT" + "WHILELS" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_b64[_s64]_x2", + "SIMD_ISA": "SVE", + "name": "svwhilele_b8[_u64]", "arguments": [ - "int64_t rn", - "int64_t rm" + "uint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "svboolx2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" + "op1": { + "register": "Xop1" }, - "rn": { - "register": "Xreg1" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -200247,16 +192149,16 @@ ], "instructions": [ [ - "WHILELT" + "WHILELS" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b64[_u32]", + "name": "svwhilelt_b16[_s32]", "arguments": [ - "uint32_t op1", - "uint32_t op2" + "int32_t op1", + "int32_t op2" ], "return_type": { "value": "svbool_t" @@ -200274,16 +192176,16 @@ ], "instructions": [ [ - "WHILELO" + "WHILELT" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b64[_u64]", + "name": "svwhilelt_b16[_s64]", "arguments": [ - "uint64_t op1", - "uint64_t op2" + "int64_t op1", + "int64_t op2" ], "return_type": { "value": "svbool_t" @@ -200301,26 +192203,26 @@ ], "instructions": [ [ - "WHILELO" + "WHILELT" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_b64[_u64]_x2", + "SIMD_ISA": "SVE", + "name": "svwhilelt_b16[_u32]", "arguments": [ - "uint64_t rn", - "uint64_t rm" + "uint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "svboolx2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" + "op1": { + "register": "Wop1" }, - "rn": { - "register": "Xreg1" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -200334,20 +192236,20 @@ }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b8[_s32]", + "name": "svwhilelt_b16[_u64]", "arguments": [ - "int32_t op1", - "int32_t op2" + "uint64_t op1", + "uint64_t op2" ], "return_type": { "value": "svbool_t" }, "Arguments_Preparation": { "op1": { - "register": "Wop1" + "register": "Xop1" }, "op2": { - "register": "Wop2" + "register": "Xop2" } }, "Architectures": [ @@ -200355,26 +192257,26 @@ ], "instructions": [ [ - "WHILELT" + "WHILELO" ] ] }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b8[_s64]", + "name": "svwhilelt_b32[_s32]", "arguments": [ - "int64_t op1", - "int64_t op2" + "int32_t op1", + "int32_t op2" ], "return_type": { "value": "svbool_t" }, "Arguments_Preparation": { "op1": { - "register": "Xop1" + "register": "Wop1" }, "op2": { - "register": "Xop2" + "register": "Wop2" } }, "Architectures": [ @@ -200387,21 +192289,21 @@ ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_b8[_s64]_x2", + "SIMD_ISA": "SVE", + "name": "svwhilelt_b32[_s64]", "arguments": [ - "int64_t rn", - "int64_t rm" + "int64_t op1", + "int64_t op2" ], "return_type": { - "value": "svboolx2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" + "op1": { + "register": "Xop1" }, - "rn": { - "register": "Xreg1" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -200415,7 +192317,7 @@ }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b8[_u32]", + "name": "svwhilelt_b32[_u32]", "arguments": [ "uint32_t op1", "uint32_t op2" @@ -200442,7 +192344,7 @@ }, { "SIMD_ISA": "SVE", - "name": "svwhilelt_b8[_u64]", + "name": "svwhilelt_b32[_u64]", "arguments": [ "uint64_t op1", "uint64_t op2" @@ -200468,21 +192370,21 @@ ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_b8[_u64]_x2", + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_s32]", "arguments": [ - "uint64_t rn", - "uint64_t rm" + "int32_t op1", + "int32_t op2" ], "return_type": { - "value": "svboolx2_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" + "op1": { + "register": "Wop1" }, - "rn": { - "register": "Xreg1" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -200490,30 +192392,26 @@ ], "instructions": [ [ - "WHILELO" + "WHILELT" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_c16[_s64]", + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_s64]", "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" + "int64_t op1", + "int64_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Xop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -200526,25 +192424,21 @@ ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_c16[_u64]", + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_u32]", "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" + "uint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Wop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -200557,56 +192451,21 @@ ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_c32[_s64]", - "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" - ], - "return_type": { - "value": "svcount_t" - }, - "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" - }, - "vl": { - "immediate": "" - } - }, - "Architectures": [ - "A64" - ], - "instructions": [ - [ - "WHILELT" - ] - ] - }, - { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_c32[_u64]", + "SIMD_ISA": "SVE", + "name": "svwhilelt_b64[_u64]", "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" + "uint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Xop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -200619,25 +192478,21 @@ ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_c64[_s64]", + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_s32]", "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" + "int32_t op1", + "int32_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Wop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -200650,25 +192505,21 @@ ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_c64[_u64]", + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_s64]", "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" + "int64_t op1", + "int64_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Xop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -200676,30 +192527,26 @@ ], "instructions": [ [ - "WHILELO" + "WHILELT" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_c8[_s64]", + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_u32]", "arguments": [ - "int64_t rn", - "int64_t rm", - "uint64_t vl" + "uint32_t op1", + "uint32_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Wop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Wop2" } }, "Architectures": [ @@ -200707,30 +192554,26 @@ ], "instructions": [ [ - "WHILELT" + "WHILELO" ] ] }, { - "SIMD_ISA": "SVE2", - "name": "svwhilelt_c8[_u64]", + "SIMD_ISA": "SVE", + "name": "svwhilelt_b8[_u64]", "arguments": [ - "uint64_t rn", - "uint64_t rm", - "uint64_t vl" + "uint64_t op1", + "uint64_t op2" ], "return_type": { - "value": "svcount_t" + "value": "svbool_t" }, "Arguments_Preparation": { - "rm": { - "register": "Xreg2" - }, - "rn": { - "register": "Xreg1" + "op1": { + "register": "Xop1" }, - "vl": { - "immediate": "" + "op2": { + "register": "Xop2" } }, "Architectures": [ @@ -224064,6 +215907,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtad_s32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtad_s64_f64", @@ -224087,6 +215953,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtad_u32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtad_u64_f64", @@ -224413,6 +216302,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtas_s64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAS" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtas_u32_f32", @@ -224436,6 +216348,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtas_u64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTAU" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtd_f64_s64", @@ -224594,6 +216529,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_s32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtd_s64_f64", @@ -224617,6 +216575,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtd_u32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtd_u64_f64", @@ -225400,6 +217381,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmd_s32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtmd_s64_f64", @@ -225423,6 +217427,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtmd_u32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtmd_u64_f64", @@ -225749,6 +217776,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtms_s64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMS" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtms_u32_f32", @@ -225772,6 +217822,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtms_u64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTMU" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtn_s16_f16", @@ -225914,6 +217987,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnd_s32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtnd_s64_f64", @@ -225937,6 +218033,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtnd_u32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtnd_u64_f64", @@ -226263,6 +218382,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtns_s64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNS" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtns_u32_f32", @@ -226286,6 +218428,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtns_u64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTNU" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtp_s16_f16", @@ -226428,6 +218593,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpd_s32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "int32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtpd_s64_f64", @@ -226451,6 +218639,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtpd_u32_f64", + "arguments": [ + "float64_t a" + ], + "return_type": { + "value": "uint32_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Dn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtpd_u64_f64", @@ -226777,6 +218988,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtps_s64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPS" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtps_u32_f32", @@ -226800,6 +219034,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvtps_u64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTPU" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtq_f16_s16", @@ -227617,6 +219874,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_s64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "int64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZS" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvts_u32_f32", @@ -227640,6 +219920,29 @@ ] ] }, + { + "SIMD_ISA": "Neon", + "name": "vcvts_u64_f32", + "arguments": [ + "float32_t a" + ], + "return_type": { + "value": "uint64_t" + }, + "Arguments_Preparation": { + "a": { + "register": "Sn" + } + }, + "Architectures": [ + "A64" + ], + "instructions": [ + [ + "FCVTZU" + ] + ] + }, { "SIMD_ISA": "Neon", "name": "vcvtx_f32_f64", @@ -228005,6 +220308,7 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -228041,6 +220345,7 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -228215,6 +220520,7 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -228251,6 +220557,7 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -318819,6 +311126,7 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -318855,6 +311163,7 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -318891,6 +311200,7 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -322606,6 +314916,7 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ @@ -322711,6 +315022,7 @@ } }, "Architectures": [ + "A32", "A64" ], "instructions": [ From 20c15afc6d927cc1c8a0535aae12bb67d47f61fd Mon Sep 17 00:00:00 2001 From: Adam Gemmell Date: Mon, 30 Mar 2026 12:16:49 +0100 Subject: [PATCH 06/30] Fix duplicate const specifier --- library/stdarch/crates/intrinsic-test/src/arm/types.rs | 3 +-- library/stdarch/crates/intrinsic-test/src/common/argument.rs | 3 +-- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/library/stdarch/crates/intrinsic-test/src/arm/types.rs b/library/stdarch/crates/intrinsic-test/src/arm/types.rs index 4be8d1e48b49a..18468bd5581f8 100644 --- a/library/stdarch/crates/intrinsic-test/src/arm/types.rs +++ b/library/stdarch/crates/intrinsic-test/src/arm/types.rs @@ -7,11 +7,10 @@ impl IntrinsicTypeDefinition for ArmIntrinsicType { /// Gets a string containing the typename for this type in C format. fn c_type(&self) -> String { let prefix = self.kind.c_prefix(); - let const_prefix = if self.constant { "const " } else { "" }; if let (Some(bit_len), simd_len, vec_len) = (self.bit_len, self.simd_len, self.vec_len) { match (simd_len, vec_len) { - (None, None) => format!("{const_prefix}{prefix}{bit_len}_t"), + (None, None) => format!("{prefix}{bit_len}_t"), (Some(simd), None) => format!("{prefix}{bit_len}x{simd}_t"), (Some(simd), Some(vec)) => format!("{prefix}{bit_len}x{simd}x{vec}_t"), (None, Some(_)) => todo!("{self:#?}"), // Likely an invalid case diff --git a/library/stdarch/crates/intrinsic-test/src/common/argument.rs b/library/stdarch/crates/intrinsic-test/src/common/argument.rs index 385cf32d3bff9..3c4a07c51a0d2 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/argument.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/argument.rs @@ -30,8 +30,7 @@ where } pub fn to_c_type(&self) -> String { - let prefix = if self.ty.constant { "const " } else { "" }; - format!("{prefix}{}", self.ty.c_type()) + self.ty.c_type() } pub fn generate_name(&self) -> String { From 542aeceb39fd128642478c441582e0d9f646dbe6 Mon Sep 17 00:00:00 2001 From: Adam Gemmell Date: Wed, 1 Apr 2026 11:41:12 +0100 Subject: [PATCH 07/30] Detect and fix lane-swapping on big endian vectors --- .../src/arm_shared/neon/generated.rs | 84 ------------------- .../intrinsic-test/src/common/argument.rs | 10 ++- .../crates/intrinsic-test/src/common/gen_c.rs | 7 ++ .../intrinsic-test/src/common/gen_rust.rs | 6 ++ .../spec/neon/arm_shared.spec.yml | 1 + 5 files changed, 20 insertions(+), 88 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index cf4d10162ec9a..106c814749c71 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -33834,7 +33834,6 @@ pub fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] @@ -33854,36 +33853,8 @@ pub fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { unsafe { transmute(vpadd_s8(transmute(a), transmute(b))) } } #[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { - let ret_val: uint8x8_t = transmute(vpadd_s8(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) - } -} -#[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] @@ -33903,36 +33874,8 @@ pub fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { unsafe { transmute(vpadd_s16(transmute(a), transmute(b))) } } #[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: uint16x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) }; - unsafe { - let ret_val: uint16x4_t = transmute(vpadd_s16(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) - } -} -#[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] #[inline(always)] -#[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] @@ -33951,33 +33894,6 @@ pub fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { pub fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { unsafe { transmute(vpadd_s32(transmute(a), transmute(b))) } } -#[doc = "Add pairwise."] -#[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] -#[inline(always)] -#[cfg(target_endian = "big")] -#[target_feature(enable = "neon")] -#[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] -#[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] -#[cfg_attr( - all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), - assert_instr(addp) -)] -#[cfg_attr( - not(target_arch = "arm"), - stable(feature = "neon_intrinsics", since = "1.59.0") -)] -#[cfg_attr( - target_arch = "arm", - unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") -)] -pub fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: uint32x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) }; - unsafe { - let ret_val: uint32x2_t = transmute(vpadd_s32(transmute(a), transmute(b))); - simd_shuffle!(ret_val, ret_val, [1, 0]) - } -} #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] #[inline(always)] diff --git a/library/stdarch/crates/intrinsic-test/src/common/argument.rs b/library/stdarch/crates/intrinsic-test/src/common/argument.rs index 3c4a07c51a0d2..413d5314d3180 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/argument.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/argument.rs @@ -175,9 +175,10 @@ where pub fn load_values_c(&self, indentation: Indentation) -> String { self.iter() .filter(|&arg| !arg.has_constraint()) - .map(|arg| { + .enumerate() + .map(|(idx, arg)| { format!( - "{indentation}{ty} {name} = cast<{ty}>({load}(&{name}_vals[i]));\n", + "{indentation}{ty} {name} = cast<{ty}>({load}(&{name}_vals[i+{idx}]));\n", ty = arg.to_c_type(), name = arg.generate_name(), load = if arg.is_simd() { @@ -196,7 +197,8 @@ where pub fn load_values_rust(&self, indentation: Indentation) -> String { self.iter() .filter(|&arg| !arg.has_constraint()) - .map(|arg| { + .enumerate() + .map(|(idx, arg)| { let load = if arg.is_simd() { arg.ty.get_load_function(Language::Rust) } else { @@ -204,7 +206,7 @@ where }; let typecast = if load.len() > 2 { "as _" } else { "" }; format!( - "{indentation}let {name} = {load}({vals_name}.as_ptr().offset(i){typecast});\n", + "{indentation}let {name} = {load}({vals_name}.as_ptr().offset(i+{idx}){typecast});\n", name = arg.generate_name(), vals_name = arg.rust_vals_array_name(), ) diff --git a/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs b/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs index a95b4c36b7bf0..12b5a600d5aef 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/gen_c.rs @@ -25,6 +25,13 @@ pub fn generate_c_test_loop( passes: u32, ) -> std::io::Result<()> { let body_indentation = indentation.nested(); + // Successive arguments are offset increasingly from their value array start + let passes = passes + 1 + - intrinsic + .arguments + .iter() + .filter(|&arg| !arg.has_constraint()) + .count() as u32; writeln!( w, "{indentation}for (int i=0; i<{passes}; i++) {{\n\ diff --git a/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs b/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs index 82b97701bb14a..5a0bc7b4d4b97 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/gen_rust.rs @@ -214,6 +214,12 @@ pub fn generate_rust_test_loop( passes: u32, ) -> std::io::Result<()> { let intrinsic_name = &intrinsic.name; + let passes = passes + 1 + - intrinsic + .arguments + .iter() + .filter(|&arg| !arg.has_constraint()) + .count() as u32; // Each function (and each specialization) has its own type. Erase that type with a cast. let mut coerce = String::from("unsafe fn("); diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index f6ef7f17d73b7..7902381fa2b82 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -13298,6 +13298,7 @@ intrinsics: doc: "Add pairwise." arguments: ["a: {neon_type[0]}", "b: {neon_type[0]}"] return_type: "{neon_type[0]}" + big_endian_inverse: false attr: - *neon-v7 - FnCall: [cfg_attr, [*test-is-arm, {FnCall: [assert_instr, [vpadd]]}]] From f24a634ba6e4d854c2bd36841f8d0e05196abe54 Mon Sep 17 00:00:00 2001 From: Adam Gemmell Date: Wed, 1 Apr 2026 12:02:57 +0100 Subject: [PATCH 08/30] Add sanity check that intrinsic output isn't empty --- library/stdarch/crates/intrinsic-test/src/common/compare.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/library/stdarch/crates/intrinsic-test/src/common/compare.rs b/library/stdarch/crates/intrinsic-test/src/common/compare.rs index c22d7fd4ec0aa..c1438d1bbf8ce 100644 --- a/library/stdarch/crates/intrinsic-test/src/common/compare.rs +++ b/library/stdarch/crates/intrinsic-test/src/common/compare.rs @@ -78,6 +78,8 @@ pub fn compare_outputs( .filter_map(|output| output.trim().split_once("\n")) .collect::>(); + assert!(!c_output_map.is_empty(), "No C intrinsic output found!"); + let intrinsics = c_output_map .keys() .chain(rust_output_map.keys()) From ea746a2d2a256d36992f718e7473f5b7a96eebf8 Mon Sep 17 00:00:00 2001 From: Adam Gemmell Date: Thu, 2 Apr 2026 17:32:29 +0100 Subject: [PATCH 09/30] Add round trip tests for v{ld,st}{2,3,4} & v{ld,st}{2,3,4} lane intrinsics --- library/stdarch/.github/workflows/main.yml | 1 + .../core_arch/src/aarch64/neon/generated.rs | 4 +- .../crates/core_arch/src/aarch64/neon/mod.rs | 205 +++++++++++------- .../src/arm_shared/neon/generated.rs | 2 +- .../src/arm_shared/neon/load_tests.rs | 18 ++ .../core_arch/src/arm_shared/neon/mod.rs | 129 ++++++++++- .../src/arm_shared/neon/store_tests.rs | 38 ++++ .../spec/neon/aarch64.spec.yml | 4 +- .../spec/neon/arm_shared.spec.yml | 2 +- 9 files changed, 317 insertions(+), 86 deletions(-) diff --git a/library/stdarch/.github/workflows/main.yml b/library/stdarch/.github/workflows/main.yml index 1f598f6e20d4c..0bbd772b0e8c0 100644 --- a/library/stdarch/.github/workflows/main.yml +++ b/library/stdarch/.github/workflows/main.yml @@ -348,6 +348,7 @@ jobs: - name: Run miri tests env: TARGET: "aarch64-unknown-linux-gnu" + RUSTFLAGS: "-Ctarget-cpu=neoverse-v3" run: | # read filters and join them with a space. FILTERS=$(cat aarch64-miri-tests.txt | tr '\n' ' ') diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index c9ce7a69a6578..d749ccbe6671b 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -12189,7 +12189,7 @@ pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> int8x16x3_t { - static_assert_uimm_bits!(LANE, 3); + static_assert_uimm_bits!(LANE, 4); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), @@ -12571,7 +12571,7 @@ pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) - #[rustc_legacy_const_generics(2)] #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> int8x16x4_t { - static_assert_uimm_bits!(LANE, 3); + static_assert_uimm_bits!(LANE, 4); unsafe extern "unadjusted" { #[cfg_attr( any(target_arch = "aarch64", target_arch = "arm64ec"), diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs index 29a278b80df84..cab36b9b4baf7 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/mod.rs @@ -1037,6 +1037,7 @@ mod tests { macro_rules! wide_store_load_roundtrip_fp16 { ($( $name:ident $args:tt);* $(;)?) => { $( + #[cfg_attr(miri, ignore)] #[simd_test(enable = "neon,fp16")] #[cfg(not(target_arch = "arm64ec"))] unsafe fn $name() { @@ -1055,13 +1056,13 @@ mod tests { test_vld1q_f16_x3(f16, 24, float16x8x3_t, vst1q_f16_x3, vld1q_f16_x3); test_vld1q_f16_x4(f16, 32, float16x8x4_t, vst1q_f16_x4, vld1q_f16_x4); - test_vld2_f16_x2(f16, 8, float16x4x2_t, vst2_f16, vld2_f16); - test_vld2_f16_x3(f16, 12, float16x4x3_t, vst3_f16, vld3_f16); - test_vld2_f16_x4(f16, 16, float16x4x4_t, vst4_f16, vld4_f16); + test_vld2_f16(f16, 8, float16x4x2_t, vst2_f16, vld2_f16); + test_vld3_f16(f16, 12, float16x4x3_t, vst3_f16, vld3_f16); + test_vld4_f16(f16, 16, float16x4x4_t, vst4_f16, vld4_f16); - test_vld2q_f16_x2(f16, 16, float16x8x2_t, vst2q_f16, vld2q_f16); - test_vld3q_f16_x3(f16, 24, float16x8x3_t, vst3q_f16, vld3q_f16); - test_vld4q_f16_x4(f16, 32, float16x8x4_t, vst4q_f16, vld4q_f16); + test_vld2q_f16(f16, 16, float16x8x2_t, vst2q_f16, vld2q_f16); + test_vld3q_f16(f16, 24, float16x8x3_t, vst3q_f16, vld3q_f16); + test_vld4q_f16(f16, 32, float16x8x4_t, vst4q_f16, vld4q_f16); } macro_rules! wide_store_load_roundtrip_aes { @@ -1195,101 +1196,149 @@ mod tests { } wide_store_load_roundtrip_neon! { - test_vld2_f32_x2(f32, 4, float32x2x2_t, vst2_f32, vld2_f32); - test_vld2_f32_x3(f32, 6, float32x2x3_t, vst3_f32, vld3_f32); - test_vld2_f32_x4(f32, 8, float32x2x4_t, vst4_f32, vld4_f32); + test_vld2_f32(f32, 4, float32x2x2_t, vst2_f32, vld2_f32); + test_vld3_f32(f32, 6, float32x2x3_t, vst3_f32, vld3_f32); + test_vld4_f32(f32, 8, float32x2x4_t, vst4_f32, vld4_f32); - test_vld2q_f32_x2(f32, 8, float32x4x2_t, vst2q_f32, vld2q_f32); - test_vld3q_f32_x3(f32, 12, float32x4x3_t, vst3q_f32, vld3q_f32); - test_vld4q_f32_x4(f32, 16, float32x4x4_t, vst4q_f32, vld4q_f32); + test_vld2q_f32(f32, 8, float32x4x2_t, vst2q_f32, vld2q_f32); + test_vld3q_f32(f32, 12, float32x4x3_t, vst3q_f32, vld3q_f32); + test_vld4q_f32(f32, 16, float32x4x4_t, vst4q_f32, vld4q_f32); - test_vld2_f64_x2(f64, 2, float64x1x2_t, vst2_f64, vld2_f64); - test_vld2_f64_x3(f64, 3, float64x1x3_t, vst3_f64, vld3_f64); - test_vld2_f64_x4(f64, 4, float64x1x4_t, vst4_f64, vld4_f64); + test_vld2_f64(f64, 2, float64x1x2_t, vst2_f64, vld2_f64); + test_vld3_f64(f64, 3, float64x1x3_t, vst3_f64, vld3_f64); + test_vld4_f64(f64, 4, float64x1x4_t, vst4_f64, vld4_f64); - test_vld2q_f64_x2(f64, 4, float64x2x2_t, vst2q_f64, vld2q_f64); - test_vld3q_f64_x3(f64, 6, float64x2x3_t, vst3q_f64, vld3q_f64); - test_vld4q_f64_x4(f64, 8, float64x2x4_t, vst4q_f64, vld4q_f64); + test_vld2q_f64(f64, 4, float64x2x2_t, vst2q_f64, vld2q_f64); + test_vld3q_f64(f64, 6, float64x2x3_t, vst3q_f64, vld3q_f64); + test_vld4q_f64(f64, 8, float64x2x4_t, vst4q_f64, vld4q_f64); - test_vld2_s8_x2(i8, 16, int8x8x2_t, vst2_s8, vld2_s8); - test_vld2_s8_x3(i8, 24, int8x8x3_t, vst3_s8, vld3_s8); - test_vld2_s8_x4(i8, 32, int8x8x4_t, vst4_s8, vld4_s8); + test_vld2_s8(i8, 16, int8x8x2_t, vst2_s8, vld2_s8); + test_vld3_s8(i8, 24, int8x8x3_t, vst3_s8, vld3_s8); + test_vld4_s8(i8, 32, int8x8x4_t, vst4_s8, vld4_s8); - test_vld2q_s8_x2(i8, 32, int8x16x2_t, vst2q_s8, vld2q_s8); - test_vld3q_s8_x3(i8, 48, int8x16x3_t, vst3q_s8, vld3q_s8); - test_vld4q_s8_x4(i8, 64, int8x16x4_t, vst4q_s8, vld4q_s8); + test_vld2q_s8(i8, 32, int8x16x2_t, vst2q_s8, vld2q_s8); + test_vld3q_s8(i8, 48, int8x16x3_t, vst3q_s8, vld3q_s8); + test_vld4q_s8(i8, 64, int8x16x4_t, vst4q_s8, vld4q_s8); - test_vld2_s16_x2(i16, 8, int16x4x2_t, vst2_s16, vld2_s16); - test_vld2_s16_x3(i16, 12, int16x4x3_t, vst3_s16, vld3_s16); - test_vld2_s16_x4(i16, 16, int16x4x4_t, vst4_s16, vld4_s16); + test_vld2_s16(i16, 8, int16x4x2_t, vst2_s16, vld2_s16); + test_vld3_s16(i16, 12, int16x4x3_t, vst3_s16, vld3_s16); + test_vld4_s16(i16, 16, int16x4x4_t, vst4_s16, vld4_s16); - test_vld2q_s16_x2(i16, 16, int16x8x2_t, vst2q_s16, vld2q_s16); - test_vld3q_s16_x3(i16, 24, int16x8x3_t, vst3q_s16, vld3q_s16); - test_vld4q_s16_x4(i16, 32, int16x8x4_t, vst4q_s16, vld4q_s16); + test_vld2q_s16(i16, 16, int16x8x2_t, vst2q_s16, vld2q_s16); + test_vld3q_s16(i16, 24, int16x8x3_t, vst3q_s16, vld3q_s16); + test_vld4q_s16(i16, 32, int16x8x4_t, vst4q_s16, vld4q_s16); - test_vld2_s32_x2(i32, 4, int32x2x2_t, vst2_s32, vld2_s32); - test_vld2_s32_x3(i32, 6, int32x2x3_t, vst3_s32, vld3_s32); - test_vld2_s32_x4(i32, 8, int32x2x4_t, vst4_s32, vld4_s32); + test_vld2_s32(i32, 4, int32x2x2_t, vst2_s32, vld2_s32); + test_vld3_s32(i32, 6, int32x2x3_t, vst3_s32, vld3_s32); + test_vld4_s32(i32, 8, int32x2x4_t, vst4_s32, vld4_s32); - test_vld2q_s32_x2(i32, 8, int32x4x2_t, vst2q_s32, vld2q_s32); - test_vld3q_s32_x3(i32, 12, int32x4x3_t, vst3q_s32, vld3q_s32); - test_vld4q_s32_x4(i32, 16, int32x4x4_t, vst4q_s32, vld4q_s32); + test_vld2q_s32(i32, 8, int32x4x2_t, vst2q_s32, vld2q_s32); + test_vld3q_s32(i32, 12, int32x4x3_t, vst3q_s32, vld3q_s32); + test_vld4q_s32(i32, 16, int32x4x4_t, vst4q_s32, vld4q_s32); - test_vld2_s64_x2(i64, 2, int64x1x2_t, vst2_s64, vld2_s64); - test_vld2_s64_x3(i64, 3, int64x1x3_t, vst3_s64, vld3_s64); - test_vld2_s64_x4(i64, 4, int64x1x4_t, vst4_s64, vld4_s64); + test_vld2_s64(i64, 2, int64x1x2_t, vst2_s64, vld2_s64); + test_vld3_s64(i64, 3, int64x1x3_t, vst3_s64, vld3_s64); + test_vld4_s64(i64, 4, int64x1x4_t, vst4_s64, vld4_s64); - test_vld2q_s64_x2(i64, 4, int64x2x2_t, vst2q_s64, vld2q_s64); - test_vld3q_s64_x3(i64, 6, int64x2x3_t, vst3q_s64, vld3q_s64); - test_vld4q_s64_x4(i64, 8, int64x2x4_t, vst4q_s64, vld4q_s64); + test_vld2q_s64(i64, 4, int64x2x2_t, vst2q_s64, vld2q_s64); + test_vld3q_s64(i64, 6, int64x2x3_t, vst3q_s64, vld3q_s64); + test_vld4q_s64(i64, 8, int64x2x4_t, vst4q_s64, vld4q_s64); - test_vld2_u8_x2(u8, 16, uint8x8x2_t, vst2_u8, vld2_u8); - test_vld2_u8_x3(u8, 24, uint8x8x3_t, vst3_u8, vld3_u8); - test_vld2_u8_x4(u8, 32, uint8x8x4_t, vst4_u8, vld4_u8); + test_vld2_u8(u8, 16, uint8x8x2_t, vst2_u8, vld2_u8); + test_vld3_u8(u8, 24, uint8x8x3_t, vst3_u8, vld3_u8); + test_vld4_u8(u8, 32, uint8x8x4_t, vst4_u8, vld4_u8); - test_vld2q_u8_x2(u8, 32, uint8x16x2_t, vst2q_u8, vld2q_u8); - test_vld3q_u8_x3(u8, 48, uint8x16x3_t, vst3q_u8, vld3q_u8); - test_vld4q_u8_x4(u8, 64, uint8x16x4_t, vst4q_u8, vld4q_u8); + test_vld2q_u8(u8, 32, uint8x16x2_t, vst2q_u8, vld2q_u8); + test_vld3q_u8(u8, 48, uint8x16x3_t, vst3q_u8, vld3q_u8); + test_vld4q_u8(u8, 64, uint8x16x4_t, vst4q_u8, vld4q_u8); - test_vld2_u16_x2(u16, 8, uint16x4x2_t, vst2_u16, vld2_u16); - test_vld2_u16_x3(u16, 12, uint16x4x3_t, vst3_u16, vld3_u16); - test_vld2_u16_x4(u16, 16, uint16x4x4_t, vst4_u16, vld4_u16); + test_vld2_u16(u16, 8, uint16x4x2_t, vst2_u16, vld2_u16); + test_vld3_u16(u16, 12, uint16x4x3_t, vst3_u16, vld3_u16); + test_vld4_u16(u16, 16, uint16x4x4_t, vst4_u16, vld4_u16); - test_vld2q_u16_x2(u16, 16, uint16x8x2_t, vst2q_u16, vld2q_u16); - test_vld3q_u16_x3(u16, 24, uint16x8x3_t, vst3q_u16, vld3q_u16); - test_vld4q_u16_x4(u16, 32, uint16x8x4_t, vst4q_u16, vld4q_u16); + test_vld2q_u16(u16, 16, uint16x8x2_t, vst2q_u16, vld2q_u16); + test_vld3q_u16(u16, 24, uint16x8x3_t, vst3q_u16, vld3q_u16); + test_vld4q_u16(u16, 32, uint16x8x4_t, vst4q_u16, vld4q_u16); - test_vld2_u32_x2(u32, 4, uint32x2x2_t, vst2_u32, vld2_u32); - test_vld2_u32_x3(u32, 6, uint32x2x3_t, vst3_u32, vld3_u32); - test_vld2_u32_x4(u32, 8, uint32x2x4_t, vst4_u32, vld4_u32); + test_vld2_u32(u32, 4, uint32x2x2_t, vst2_u32, vld2_u32); + test_vld3_u32(u32, 6, uint32x2x3_t, vst3_u32, vld3_u32); + test_vld4_u32(u32, 8, uint32x2x4_t, vst4_u32, vld4_u32); - test_vld2q_u32_x2(u32, 8, uint32x4x2_t, vst2q_u32, vld2q_u32); - test_vld3q_u32_x3(u32, 12, uint32x4x3_t, vst3q_u32, vld3q_u32); - test_vld4q_u32_x4(u32, 16, uint32x4x4_t, vst4q_u32, vld4q_u32); + test_vld2q_u32(u32, 8, uint32x4x2_t, vst2q_u32, vld2q_u32); + test_vld3q_u32(u32, 12, uint32x4x3_t, vst3q_u32, vld3q_u32); + test_vld4q_u32(u32, 16, uint32x4x4_t, vst4q_u32, vld4q_u32); - test_vld2_u64_x2(u64, 2, uint64x1x2_t, vst2_u64, vld2_u64); - test_vld2_u64_x3(u64, 3, uint64x1x3_t, vst3_u64, vld3_u64); - test_vld2_u64_x4(u64, 4, uint64x1x4_t, vst4_u64, vld4_u64); + test_vld2_u64(u64, 2, uint64x1x2_t, vst2_u64, vld2_u64); + test_vld3_u64(u64, 3, uint64x1x3_t, vst3_u64, vld3_u64); + test_vld4_u64(u64, 4, uint64x1x4_t, vst4_u64, vld4_u64); - test_vld2q_u64_x2(u64, 4, uint64x2x2_t, vst2q_u64, vld2q_u64); - test_vld3q_u64_x3(u64, 6, uint64x2x3_t, vst3q_u64, vld3q_u64); - test_vld4q_u64_x4(u64, 8, uint64x2x4_t, vst4q_u64, vld4q_u64); + test_vld2q_u64(u64, 4, uint64x2x2_t, vst2q_u64, vld2q_u64); + test_vld3q_u64(u64, 6, uint64x2x3_t, vst3q_u64, vld3q_u64); + test_vld4q_u64(u64, 8, uint64x2x4_t, vst4q_u64, vld4q_u64); - test_vld2_p8_x2(p8, 16, poly8x8x2_t, vst2_p8, vld2_p8); - test_vld2_p8_x3(p8, 24, poly8x8x3_t, vst3_p8, vld3_p8); - test_vld2_p8_x4(p8, 32, poly8x8x4_t, vst4_p8, vld4_p8); + test_vld2_p8(p8, 16, poly8x8x2_t, vst2_p8, vld2_p8); + test_vld3_p8(p8, 24, poly8x8x3_t, vst3_p8, vld3_p8); + test_vld4_p8(p8, 32, poly8x8x4_t, vst4_p8, vld4_p8); - test_vld2q_p8_x2(p8, 32, poly8x16x2_t, vst2q_p8, vld2q_p8); - test_vld3q_p8_x3(p8, 48, poly8x16x3_t, vst3q_p8, vld3q_p8); - test_vld4q_p8_x4(p8, 64, poly8x16x4_t, vst4q_p8, vld4q_p8); + test_vld2q_p8(p8, 32, poly8x16x2_t, vst2q_p8, vld2q_p8); + test_vld3q_p8(p8, 48, poly8x16x3_t, vst3q_p8, vld3q_p8); + test_vld4q_p8(p8, 64, poly8x16x4_t, vst4q_p8, vld4q_p8); - test_vld2_p16_x2(p16, 8, poly16x4x2_t, vst2_p16, vld2_p16); - test_vld2_p16_x3(p16, 12, poly16x4x3_t, vst3_p16, vld3_p16); - test_vld2_p16_x4(p16, 16, poly16x4x4_t, vst4_p16, vld4_p16); + test_vld2_p16(p16, 8, poly16x4x2_t, vst2_p16, vld2_p16); + test_vld3_p16(p16, 12, poly16x4x3_t, vst3_p16, vld3_p16); + test_vld4_p16(p16, 16, poly16x4x4_t, vst4_p16, vld4_p16); - test_vld2q_p16_x2(p16, 16, poly16x8x2_t, vst2q_p16, vld2q_p16); - test_vld3q_p16_x3(p16, 24, poly16x8x3_t, vst3q_p16, vld3q_p16); - test_vld4q_p16_x4(p16, 32, poly16x8x4_t, vst4q_p16, vld4q_p16); + test_vld2q_p16(p16, 16, poly16x8x2_t, vst2q_p16, vld2q_p16); + test_vld3q_p16(p16, 24, poly16x8x3_t, vst3q_p16, vld3q_p16); + test_vld4q_p16(p16, 32, poly16x8x4_t, vst4q_p16, vld4q_p16); + } + + macro_rules! lane_wide_store_load_roundtrip { + ($elem_ty:ty, $len:expr, $idx:expr, $vec_ty:ty, $store:ident, $load:ident) => { + let vals: [$elem_ty; $len] = crate::array::from_fn(|i| i as $elem_ty); + let a: $vec_ty = transmute(vals); + let mut tmp = [0 as $elem_ty; 4]; + $store::<$idx>(tmp.as_mut_ptr().cast(), a); + let r: $vec_ty = $load::<$idx>(tmp.as_ptr().cast(), a); + let out: [$elem_ty; $len] = transmute(r); + assert_eq!(out, vals); + }; + } + + macro_rules! lane_wide_store_load_roundtrip_neon { + ($( $name:ident $args:tt);* $(;)?) => { + $( + #[cfg_attr(miri, ignore)] + #[simd_test(enable = "neon")] + unsafe fn $name() { + lane_wide_store_load_roundtrip! $args; + } + )* + }; + } + + lane_wide_store_load_roundtrip_neon! { + test_vld2q_lane_s8(i8, 32, 15, int8x16x2_t, vst2q_lane_s8, vld2q_lane_s8); + test_vld3q_lane_s8(i8, 48, 15, int8x16x3_t, vst3q_lane_s8, vld3q_lane_s8); + test_vld4q_lane_s8(i8, 64, 15, int8x16x4_t, vst4q_lane_s8, vld4q_lane_s8); + + test_vld2q_lane_u8(u8, 32, 15, uint8x16x2_t, vst2q_lane_u8, vld2q_lane_u8); + test_vld3q_lane_u8(u8, 48, 15, uint8x16x3_t, vst3q_lane_u8, vld3q_lane_u8); + test_vld4q_lane_u8(u8, 64, 15, uint8x16x4_t, vst4q_lane_u8, vld4q_lane_u8); + + test_vld2_lane_s64(i64, 2, 0, int64x1x2_t, vst2_lane_s64, vld2_lane_s64); + test_vld3_lane_s64(i64, 3, 0, int64x1x3_t, vst3_lane_s64, vld3_lane_s64); + test_vld4_lane_s64(i64, 4, 0, int64x1x4_t, vst4_lane_s64, vld4_lane_s64); + test_vld2q_lane_s64(i64, 4, 1, int64x2x2_t, vst2q_lane_s64, vld2q_lane_s64); + test_vld3q_lane_s64(i64, 6, 1, int64x2x3_t, vst3q_lane_s64, vld3q_lane_s64); + test_vld4q_lane_s64(i64, 8, 1, int64x2x4_t, vst4q_lane_s64, vld4q_lane_s64); + + test_vld2_lane_u64(u64, 2, 0, uint64x1x2_t, vst2_lane_u64, vld2_lane_u64); + test_vld3_lane_u64(u64, 3, 0, uint64x1x3_t, vst3_lane_u64, vld3_lane_u64); + test_vld4_lane_u64(u64, 4, 0, uint64x1x4_t, vst4_lane_u64, vld4_lane_u64); + test_vld2q_lane_u64(u64, 4, 1, uint64x2x2_t, vst2q_lane_u64, vld2q_lane_u64); + test_vld3q_lane_u64(u64, 6, 1, uint64x2x3_t, vst3q_lane_u64, vld3q_lane_u64); + test_vld4q_lane_u64(u64, 8, 1, uint64x2x4_t, vst4q_lane_u64, vld4q_lane_u64); } } diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index cf4d10162ec9a..663cba151e22b 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -66192,7 +66192,7 @@ pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] pub unsafe fn vst2q_lane_f16(a: *mut f16, b: float16x8x2_t) { - static_assert_uimm_bits!(LANE, 1); + static_assert_uimm_bits!(LANE, 3); unsafe extern "unadjusted" { #[cfg_attr(target_arch = "arm", link_name = "llvm.arm.neon.vst2lane.p0.v8f16")] fn _vst2q_lane_f16(ptr: *mut i8, a: float16x8_t, b: float16x8_t, n: i32, size: i32); diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/load_tests.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/load_tests.rs index cc821b4af2025..70a37f7c05dad 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/load_tests.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/load_tests.rs @@ -190,6 +190,24 @@ fn test_vld1q_p64() { assert_eq!(r, e) } +#[cfg(not(target_arch = "arm64ec"))] +#[simd_test(enable = "neon,fp16")] +fn test_vld1_f16() { + let a: [f16; 5] = [0., 1., 2., 3., 4.]; + let e = f16x4::new(1., 2., 3., 4.); + let r = unsafe { f16x4::from(vld1_f16(a[1..].as_ptr())) }; + assert_eq!(r, e) +} + +#[cfg(not(target_arch = "arm64ec"))] +#[simd_test(enable = "neon,fp16")] +fn test_vld1q_f16() { + let a: [f16; 9] = [0., 1., 2., 3., 4., 5., 6., 7., 8.]; + let e = f16x8::new(1., 2., 3., 4., 5., 6., 7., 8.); + let r = unsafe { f16x8::from(vld1q_f16(a[1..].as_ptr())) }; + assert_eq!(r, e) +} + #[simd_test(enable = "neon")] fn test_vld1_f32() { let a: [f32; 3] = [0., 1., 2.]; diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs index 8a4a6e9228221..ed65de2b89460 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/mod.rs @@ -5793,8 +5793,7 @@ mod tests { #[cfg(not(target_arch = "arm64ec"))] mod fp16 { use super::*; - #[cfg_attr(target_arch = "arm", simd_test(enable = "neon,fp16"))] - #[cfg_attr(not(target_arch = "arm"), simd_test(enable = "neon"))] + #[simd_test(enable = "neon,fp16")] fn test_vcombine_f16() { let a = f16x4::from_array([3_f16, 4., 5., 6.]); let b = f16x4::from_array([13_f16, 14., 15., 16.]); @@ -5802,6 +5801,40 @@ mod tests { let c = f16x8::from(vcombine_f16(a.into(), b.into())); assert_eq!(c, e); } + + #[simd_test(enable = "neon,fp16")] + fn test_vld1_lane_f16() { + let a = f16x4::new(0., 1., 2., 3.); + let elem: f16 = 42.; + let e = f16x4::new(0., 1., 2., 42.); + let r = unsafe { f16x4::from(vld1_lane_f16::<3>(&elem, a.into())) }; + assert_eq!(r, e) + } + + #[simd_test(enable = "neon,fp16")] + fn test_vld1q_lane_f16() { + let a = f16x8::new(0., 1., 2., 3., 4., 5., 6., 7.); + let elem: f16 = 42.; + let e = f16x8::new(0., 1., 2., 3., 4., 5., 6., 42.); + let r = unsafe { f16x8::from(vld1q_lane_f16::<7>(&elem, a.into())) }; + assert_eq!(r, e) + } + + #[simd_test(enable = "neon,fp16")] + fn test_vld1_dup_f16() { + let elem: f16 = 42.; + let e = f16x4::new(42., 42., 42., 42.); + let r = unsafe { f16x4::from(vld1_dup_f16(&elem)) }; + assert_eq!(r, e) + } + + #[simd_test(enable = "neon,fp16")] + fn test_vld1q_dup_f16() { + let elem: f16 = 42.; + let e = f16x8::new(42., 42., 42., 42., 42., 42., 42., 42.); + let r = unsafe { f16x8::from(vld1q_dup_f16(&elem)) }; + assert_eq!(r, e) + } } test_vcombine!(test_vcombine_s32 => vcombine_s32([3_i32, -4], [13_i32, -14])); @@ -5814,6 +5847,98 @@ mod tests { test_vcombine!(test_vcombine_p64 => vcombine_p64([3_u64], [13_u64])); #[cfg(any(target_arch = "aarch64", target_arch = "arm64ec"))] test_vcombine!(test_vcombine_f64 => vcombine_f64([-3_f64], [13_f64])); + + macro_rules! lane_wide_store_load_roundtrip { + ($elem_ty:ty, $len:expr, $idx:expr, $vec_ty:ty, $store:ident, $load:ident) => { + let vals: [$elem_ty; $len] = crate::array::from_fn(|i| i as $elem_ty); + let a: $vec_ty = transmute(vals); + let mut tmp = [0 as $elem_ty; 4]; + $store::<$idx>(tmp.as_mut_ptr().cast(), a); + let r: $vec_ty = $load::<$idx>(tmp.as_ptr().cast(), a); + let out: [$elem_ty; $len] = transmute(r); + assert_eq!(out, vals); + }; + } + + // Most of these are implemented with builtins, which miri can't handle + macro_rules! lane_wide_store_load_roundtrip_neon { + ($( $name:ident $args:tt);* $(;)?) => { + $( + #[cfg_attr(miri, ignore)] + #[simd_test(enable = "neon")] + unsafe fn $name() { + lane_wide_store_load_roundtrip! $args; + } + )* + }; + } + + macro_rules! lane_wide_store_load_roundtrip_fp16 { + ($( $name:ident $args:tt);* $(;)?) => { + $( + #[cfg_attr(miri, ignore)] + #[simd_test(enable = "neon,fp16")] + #[cfg(not(target_arch = "arm64ec"))] + unsafe fn $name() { + lane_wide_store_load_roundtrip! $args; + } + )* + }; + } + + lane_wide_store_load_roundtrip_neon! { + test_vld2_lane_s8(i8, 16, 7, int8x8x2_t, vst2_lane_s8, vld2_lane_s8); + test_vld3_lane_s8(i8, 24, 7, int8x8x3_t, vst3_lane_s8, vld3_lane_s8); + test_vld4_lane_s8(i8, 32, 7, int8x8x4_t, vst4_lane_s8, vld4_lane_s8); + + test_vld2_lane_u8(u8, 16, 7, uint8x8x2_t, vst2_lane_u8, vld2_lane_u8); + test_vld3_lane_u8(u8, 24, 7, uint8x8x3_t, vst3_lane_u8, vld3_lane_u8); + test_vld4_lane_u8(u8, 32, 7, uint8x8x4_t, vst4_lane_u8, vld4_lane_u8); + + test_vld2_lane_s16(i16, 8, 3, int16x4x2_t, vst2_lane_s16, vld2_lane_s16); + test_vld3_lane_s16(i16, 12, 3, int16x4x3_t, vst3_lane_s16, vld3_lane_s16); + test_vld4_lane_s16(i16, 16, 3, int16x4x4_t, vst4_lane_s16, vld4_lane_s16); + test_vld2q_lane_s16(i16, 16, 7, int16x8x2_t, vst2q_lane_s16, vld2q_lane_s16); + test_vld3q_lane_s16(i16, 24, 7, int16x8x3_t, vst3q_lane_s16, vld3q_lane_s16); + test_vld4q_lane_s16(i16, 32, 7, int16x8x4_t, vst4q_lane_s16, vld4q_lane_s16); + + test_vld2_lane_u16(u16, 8, 3, uint16x4x2_t, vst2_lane_u16, vld2_lane_u16); + test_vld3_lane_u16(u16, 12, 3, uint16x4x3_t, vst3_lane_u16, vld3_lane_u16); + test_vld4_lane_u16(u16, 16, 3, uint16x4x4_t, vst4_lane_u16, vld4_lane_u16); + test_vld2q_lane_u16(u16, 16, 7, uint16x8x2_t, vst2q_lane_u16, vld2q_lane_u16); + test_vld3q_lane_u16(u16, 24, 7, uint16x8x3_t, vst3q_lane_u16, vld3q_lane_u16); + test_vld4q_lane_u16(u16, 32, 7, uint16x8x4_t, vst4q_lane_u16, vld4q_lane_u16); + + test_vld2_lane_s32(i32, 4, 1, int32x2x2_t, vst2_lane_s32, vld2_lane_s32); + test_vld3_lane_s32(i32, 6, 1, int32x2x3_t, vst3_lane_s32, vld3_lane_s32); + test_vld4_lane_s32(i32, 8, 1, int32x2x4_t, vst4_lane_s32, vld4_lane_s32); + test_vld2q_lane_s32(i32, 8, 3, int32x4x2_t, vst2q_lane_s32, vld2q_lane_s32); + test_vld3q_lane_s32(i32, 12, 3, int32x4x3_t, vst3q_lane_s32, vld3q_lane_s32); + test_vld4q_lane_s32(i32, 16, 3, int32x4x4_t, vst4q_lane_s32, vld4q_lane_s32); + + test_vld2_lane_u32(u32, 4, 1, uint32x2x2_t, vst2_lane_u32, vld2_lane_u32); + test_vld3_lane_u32(u32, 6, 1, uint32x2x3_t, vst3_lane_u32, vld3_lane_u32); + test_vld4_lane_u32(u32, 8, 1, uint32x2x4_t, vst4_lane_u32, vld4_lane_u32); + test_vld2q_lane_u32(u32, 8, 3, uint32x4x2_t, vst2q_lane_u32, vld2q_lane_u32); + test_vld3q_lane_u32(u32, 12, 3, uint32x4x3_t, vst3q_lane_u32, vld3q_lane_u32); + test_vld4q_lane_u32(u32, 16, 3, uint32x4x4_t, vst4q_lane_u32, vld4q_lane_u32); + + test_vld2_lane_f32(f32, 4, 1, float32x2x2_t, vst2_lane_f32, vld2_lane_f32); + test_vld3_lane_f32(f32, 6, 1, float32x2x3_t, vst3_lane_f32, vld3_lane_f32); + test_vld4_lane_f32(f32, 8, 1, float32x2x4_t, vst4_lane_f32, vld4_lane_f32); + test_vld2q_lane_f32(f32, 8, 3, float32x4x2_t, vst2q_lane_f32, vld2q_lane_f32); + test_vld3q_lane_f32(f32, 12, 3, float32x4x3_t, vst3q_lane_f32, vld3q_lane_f32); + test_vld4q_lane_f32(f32, 16, 3, float32x4x4_t, vst4q_lane_f32, vld4q_lane_f32); + } + + lane_wide_store_load_roundtrip_fp16! { + test_vld2_lane_f16(f16, 8, 3, float16x4x2_t, vst2_lane_f16, vld2_lane_f16); + test_vld3_lane_f16(f16, 12, 3, float16x4x3_t, vst3_lane_f16, vld3_lane_f16); + test_vld4_lane_f16(f16, 16, 3, float16x4x4_t, vst4_lane_f16, vld4_lane_f16); + test_vld2q_lane_f16(f16, 16, 7, float16x8x2_t, vst2q_lane_f16, vld2q_lane_f16); + test_vld3q_lane_f16(f16, 24, 7, float16x8x3_t, vst3q_lane_f16, vld3q_lane_f16); + test_vld4q_lane_f16(f16, 32, 7, float16x8x4_t, vst4q_lane_f16, vld4q_lane_f16); + } } #[cfg(all(test, target_arch = "arm"))] diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/store_tests.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/store_tests.rs index 2b10b38f2dd8f..6eb60e4c78bc8 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/store_tests.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/store_tests.rs @@ -406,6 +406,44 @@ fn test_vst1q_p64() { assert_eq!(vals[2], 2); } +#[cfg(not(target_arch = "arm64ec"))] +#[simd_test(enable = "neon,fp16")] +fn test_vst1_f16() { + let mut vals = [0_f16; 5]; + let a = f16x4::new(1., 2., 3., 4.); + + unsafe { + vst1_f16(vals[1..].as_mut_ptr(), a.into()); + } + + assert_eq!(vals[0], 0.); + assert_eq!(vals[1], 1.); + assert_eq!(vals[2], 2.); + assert_eq!(vals[3], 3.); + assert_eq!(vals[4], 4.); +} + +#[cfg(not(target_arch = "arm64ec"))] +#[simd_test(enable = "neon,fp16")] +fn test_vst1q_f16() { + let mut vals = [0_f16; 9]; + let a = f16x8::new(1., 2., 3., 4., 5., 6., 7., 8.); + + unsafe { + vst1q_f16(vals[1..].as_mut_ptr(), a.into()); + } + + assert_eq!(vals[0], 0.); + assert_eq!(vals[1], 1.); + assert_eq!(vals[2], 2.); + assert_eq!(vals[3], 3.); + assert_eq!(vals[4], 4.); + assert_eq!(vals[5], 5.); + assert_eq!(vals[6], 6.); + assert_eq!(vals[7], 7.); + assert_eq!(vals[8], 8.); +} + #[simd_test(enable = "neon")] fn test_vst1_f32() { let mut vals = [0_f32; 3]; diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index a769d352649c9..990fd7d4986ac 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -3822,7 +3822,7 @@ intrinsics: safety: unsafe: [neon] types: - - ['*const i8', int8x16x3_t, int8x16_t, i8, '3'] + - ['*const i8', int8x16x3_t, int8x16_t, i8, '4'] - ['*const i64', int64x2x3_t, int64x2_t, i64, '1'] compose: - FnCall: [static_assert_uimm_bits!, [LANE, '{type[4]}']] @@ -4246,7 +4246,7 @@ intrinsics: safety: unsafe: [neon] types: - - ['*const i8', int8x16x4_t, int8x16_t, i8, '3'] + - ['*const i8', int8x16x4_t, int8x16_t, i8, '4'] - ['*const i64', int64x2x4_t, int64x2_t, i64, '1'] - ['*const f64', float64x2x4_t, float64x2_t, f64, '1'] compose: diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index f6ef7f17d73b7..2d29d95f0a31d 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -5368,7 +5368,7 @@ intrinsics: unsafe: [neon] types: - [f16, float16x4x2_t, '2', float16x4_t, '2'] - - [f16, float16x8x2_t, '1', float16x8_t, '2'] + - [f16, float16x8x2_t, '3', float16x8_t, '2'] compose: - FnCall: [static_assert_uimm_bits!, [LANE, "{type[2]}"]] - LLVMLink: From 5f481c226e75c271d178f1a851f9243deaab793b Mon Sep 17 00:00:00 2001 From: Adam Gemmell Date: Wed, 15 Apr 2026 16:52:13 +0100 Subject: [PATCH 10/30] Generate some svldff1 tests now that the qemu bug has been fixed --- .../src/aarch64/sve/ld_st_tests_aarch64.rs | 2348 +++++++++++++++-- .../stdarch-gen-arm/src/load_store_tests.rs | 7 - 2 files changed, 2119 insertions(+), 236 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs index 973b7e9fa35a0..3007ba4ee6597 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/ld_st_tests_aarch64.rs @@ -7067,304 +7067,606 @@ unsafe fn test_svldff1_u64() { ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1_vnum_f32() { +unsafe fn test_svldff1_gather_s32index_f32() { + let indices = svindex_s32(0, 1); svsetffr(); - let _ = svld1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); - let loaded = svldff1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); - let len = svcntw() as usize; + let _ = svld1_gather_s32index_f32(svptrue_b32(), F32_DATA.as_ptr(), indices); + let loaded = svldff1_gather_s32index_f32(svptrue_b32(), F32_DATA.as_ptr(), indices); assert_vector_matches_f32( loaded, svcvt_f32_s32_x( svptrue_b32(), - svindex_s32( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1_vnum_f64() { +unsafe fn test_svldff1_gather_s32index_s32() { + let indices = svindex_s32(0, 1); svsetffr(); - let _ = svld1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); - let loaded = svldff1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); - let len = svcntd() as usize; + let _ = svld1_gather_s32index_s32(svptrue_b32(), I32_DATA.as_ptr(), indices); + let loaded = svldff1_gather_s32index_s32(svptrue_b32(), I32_DATA.as_ptr(), indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_s32index_u32() { + let indices = svindex_s32(0, 1); + svsetffr(); + let _ = svld1_gather_s32index_u32(svptrue_b32(), U32_DATA.as_ptr(), indices); + let loaded = svldff1_gather_s32index_u32(svptrue_b32(), U32_DATA.as_ptr(), indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_s64index_f64() { + let indices = svindex_s64(0, 1); + svsetffr(); + let _ = svld1_gather_s64index_f64(svptrue_b64(), F64_DATA.as_ptr(), indices); + let loaded = svldff1_gather_s64index_f64(svptrue_b64(), F64_DATA.as_ptr(), indices); assert_vector_matches_f64( loaded, svcvt_f64_s64_x( svptrue_b64(), - svindex_s64( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1_vnum_s8() { +unsafe fn test_svldff1_gather_s64index_s64() { + let indices = svindex_s64(0, 1); svsetffr(); - let _ = svld1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); - let loaded = svldff1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); - let len = svcntb() as usize; - assert_vector_matches_i8( + let _ = svld1_gather_s64index_s64(svptrue_b64(), I64_DATA.as_ptr(), indices); + let loaded = svldff1_gather_s64index_s64(svptrue_b64(), I64_DATA.as_ptr(), indices); + assert_vector_matches_i64( loaded, - svindex_s8( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1_vnum_s16() { +unsafe fn test_svldff1_gather_s64index_u64() { + let indices = svindex_s64(0, 1); svsetffr(); - let _ = svld1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); - let loaded = svldff1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); - let len = svcnth() as usize; - assert_vector_matches_i16( + let _ = svld1_gather_s64index_u64(svptrue_b64(), U64_DATA.as_ptr(), indices); + let loaded = svldff1_gather_s64index_u64(svptrue_b64(), U64_DATA.as_ptr(), indices); + assert_vector_matches_u64( loaded, - svindex_s16( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1_vnum_s32() { +unsafe fn test_svldff1_gather_u32index_f32() { + let indices = svindex_u32(0, 1); svsetffr(); - let _ = svld1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); - let loaded = svldff1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); - let len = svcntw() as usize; - assert_vector_matches_i32( + let _ = svld1_gather_u32index_f32(svptrue_b32(), F32_DATA.as_ptr(), indices); + let loaded = svldff1_gather_u32index_f32(svptrue_b32(), F32_DATA.as_ptr(), indices); + assert_vector_matches_f32( loaded, - svindex_s32( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1_vnum_s64() { +unsafe fn test_svldff1_gather_u32index_s32() { + let indices = svindex_u32(0, 1); svsetffr(); - let _ = svld1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); - let loaded = svldff1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); - let len = svcntd() as usize; - assert_vector_matches_i64( + let _ = svld1_gather_u32index_s32(svptrue_b32(), I32_DATA.as_ptr(), indices); + let loaded = svldff1_gather_u32index_s32(svptrue_b32(), I32_DATA.as_ptr(), indices); + assert_vector_matches_i32( loaded, - svindex_s64( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1_vnum_u8() { +unsafe fn test_svldff1_gather_u32index_u32() { + let indices = svindex_u32(0, 1); svsetffr(); - let _ = svld1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); - let loaded = svldff1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); - let len = svcntb() as usize; - assert_vector_matches_u8( + let _ = svld1_gather_u32index_u32(svptrue_b32(), U32_DATA.as_ptr(), indices); + let loaded = svldff1_gather_u32index_u32(svptrue_b32(), U32_DATA.as_ptr(), indices); + assert_vector_matches_u32( loaded, - svindex_u8( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1_vnum_u16() { +unsafe fn test_svldff1_gather_u64index_f64() { + let indices = svindex_u64(0, 1); svsetffr(); - let _ = svld1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); - let loaded = svldff1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); - let len = svcnth() as usize; - assert_vector_matches_u16( + let _ = svld1_gather_u64index_f64(svptrue_b64(), F64_DATA.as_ptr(), indices); + let loaded = svldff1_gather_u64index_f64(svptrue_b64(), F64_DATA.as_ptr(), indices); + assert_vector_matches_f64( loaded, - svindex_u16( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1_vnum_u32() { +unsafe fn test_svldff1_gather_u64index_s64() { + let indices = svindex_u64(0, 1); svsetffr(); - let _ = svld1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); - let loaded = svldff1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); - let len = svcntw() as usize; - assert_vector_matches_u32( + let _ = svld1_gather_u64index_s64(svptrue_b64(), I64_DATA.as_ptr(), indices); + let loaded = svldff1_gather_u64index_s64(svptrue_b64(), I64_DATA.as_ptr(), indices); + assert_vector_matches_i64( loaded, - svindex_u32( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1_vnum_u64() { +unsafe fn test_svldff1_gather_u64index_u64() { + let indices = svindex_u64(0, 1); svsetffr(); - let _ = svld1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); - let loaded = svldff1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); - let len = svcntd() as usize; + let _ = svld1_gather_u64index_u64(svptrue_b64(), U64_DATA.as_ptr(), indices); + let loaded = svldff1_gather_u64index_u64(svptrue_b64(), U64_DATA.as_ptr(), indices); assert_vector_matches_u64( loaded, - svindex_u64( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_s16() { +unsafe fn test_svldff1_gather_s32offset_f32() { + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); svsetffr(); - let _ = svld1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); - let loaded = svldff1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); - assert_vector_matches_i16( + let _ = svld1_gather_s32offset_f32(svptrue_b32(), F32_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_s32offset_f32(svptrue_b32(), F32_DATA.as_ptr(), offsets); + assert_vector_matches_f32( loaded, - svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_s32() { +unsafe fn test_svldff1_gather_s32offset_s32() { + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); svsetffr(); - let _ = svld1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); - let loaded = svldff1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + let _ = svld1_gather_s32offset_s32(svptrue_b32(), I32_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_s32offset_s32(svptrue_b32(), I32_DATA.as_ptr(), offsets); assert_vector_matches_i32( loaded, svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sh_s32() { +unsafe fn test_svldff1_gather_s32offset_u32() { + let offsets = svindex_s32(0, 4u32.try_into().unwrap()); svsetffr(); - let _ = svld1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); - let loaded = svldff1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); - assert_vector_matches_i32( + let _ = svld1_gather_s32offset_u32(svptrue_b32(), U32_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_s32offset_u32(svptrue_b32(), U32_DATA.as_ptr(), offsets); + assert_vector_matches_u32( loaded, - svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_s64() { +unsafe fn test_svldff1_gather_s64offset_f64() { + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); svsetffr(); - let _ = svld1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); - let loaded = svldff1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); - assert_vector_matches_i64( + let _ = svld1_gather_s64offset_f64(svptrue_b64(), F64_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_s64offset_f64(svptrue_b64(), F64_DATA.as_ptr(), offsets); + assert_vector_matches_f64( loaded, - svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sh_s64() { +unsafe fn test_svldff1_gather_s64offset_s64() { + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); svsetffr(); - let _ = svld1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); - let loaded = svldff1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + let _ = svld1_gather_s64offset_s64(svptrue_b64(), I64_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_s64offset_s64(svptrue_b64(), I64_DATA.as_ptr(), offsets); assert_vector_matches_i64( loaded, svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sw_s64() { +unsafe fn test_svldff1_gather_s64offset_u64() { + let offsets = svindex_s64(0, 8u32.try_into().unwrap()); svsetffr(); - let _ = svld1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); - let loaded = svldff1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); - assert_vector_matches_i64( + let _ = svld1_gather_s64offset_u64(svptrue_b64(), U64_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_s64offset_u64(svptrue_b64(), U64_DATA.as_ptr(), offsets); + assert_vector_matches_u64( loaded, - svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_u16() { +unsafe fn test_svldff1_gather_u32offset_f32() { + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); svsetffr(); - let _ = svld1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); - let loaded = svldff1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); - assert_vector_matches_u16( + let _ = svld1_gather_u32offset_f32(svptrue_b32(), F32_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_u32offset_f32(svptrue_b32(), F32_DATA.as_ptr(), offsets); + assert_vector_matches_f32( loaded, - svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_u32() { +unsafe fn test_svldff1_gather_u32offset_s32() { + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); svsetffr(); - let _ = svld1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); - let loaded = svldff1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); - assert_vector_matches_u32( + let _ = svld1_gather_u32offset_s32(svptrue_b32(), I32_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_u32offset_s32(svptrue_b32(), I32_DATA.as_ptr(), offsets); + assert_vector_matches_i32( loaded, - svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sh_u32() { +unsafe fn test_svldff1_gather_u32offset_u32() { + let offsets = svindex_u32(0, 4u32.try_into().unwrap()); svsetffr(); - let _ = svld1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); - let loaded = svldff1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + let _ = svld1_gather_u32offset_u32(svptrue_b32(), U32_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_u32offset_u32(svptrue_b32(), U32_DATA.as_ptr(), offsets); assert_vector_matches_u32( loaded, svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_u64() { +unsafe fn test_svldff1_gather_u64offset_f64() { + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); svsetffr(); - let _ = svld1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); - let loaded = svldff1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); - assert_vector_matches_u64( + let _ = svld1_gather_u64offset_f64(svptrue_b64(), F64_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_u64offset_f64(svptrue_b64(), F64_DATA.as_ptr(), offsets); + assert_vector_matches_f64( loaded, - svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sh_u64() { +unsafe fn test_svldff1_gather_u64offset_s64() { + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); svsetffr(); - let _ = svld1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); - let loaded = svldff1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); - assert_vector_matches_u64( + let _ = svld1_gather_u64offset_s64(svptrue_b64(), I64_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_u64offset_s64(svptrue_b64(), I64_DATA.as_ptr(), offsets); + assert_vector_matches_i64( loaded, - svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sw_u64() { +unsafe fn test_svldff1_gather_u64offset_u64() { + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); svsetffr(); - let _ = svld1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); - let loaded = svldff1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + let _ = svld1_gather_u64offset_u64(svptrue_b64(), U64_DATA.as_ptr(), offsets); + let loaded = svldff1_gather_u64offset_u64(svptrue_b64(), U64_DATA.as_ptr(), offsets); assert_vector_matches_u64( loaded, svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_vnum_s16() { +unsafe fn test_svldff1_gather_u64base_f64() { + let bases = svdup_n_u64(F64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); svsetffr(); - let _ = svld1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); - let loaded = svldff1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); - let len = svcnth() as usize; - assert_vector_matches_i16( + let _ = svld1_gather_u64base_f64(svptrue_b64(), bases); + let loaded = svldff1_gather_u64base_f64(svptrue_b64(), bases); + assert_vector_matches_f64( loaded, - svindex_s16( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_vnum_s32() { +unsafe fn test_svldff1_gather_u64base_s64() { + let bases = svdup_n_u64(I64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); svsetffr(); - let _ = svld1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); - let loaded = svldff1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); - let len = svcntw() as usize; - assert_vector_matches_i32( + let _ = svld1_gather_u64base_s64(svptrue_b64(), bases); + let loaded = svldff1_gather_u64base_s64(svptrue_b64(), bases); + assert_vector_matches_i64( loaded, - svindex_s32( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sh_vnum_s32() { +unsafe fn test_svldff1_gather_u64base_u64() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); svsetffr(); - let _ = svld1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); - let loaded = svldff1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let _ = svld1_gather_u64base_u64(svptrue_b64(), bases); + let loaded = svldff1_gather_u64base_u64(svptrue_b64(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u32base_index_f32() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1_gather_u32base_index_f32( + svptrue_b32(), + bases, + F32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); + let loaded = svldff1_gather_u32base_index_f32( + svptrue_b32(), + bases, + F32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u32base_index_s32() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1_gather_u32base_index_s32( + svptrue_b32(), + bases, + I32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); + let loaded = svldff1_gather_u32base_index_s32( + svptrue_b32(), + bases, + I32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u32base_index_u32() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1_gather_u32base_index_u32( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); + let loaded = svldff1_gather_u32base_index_u32( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 / (4u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u64base_index_f64() { + let bases = svdup_n_u64(F64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let _ = svld1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap()); + let loaded = svldff1_gather_u64base_index_f64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u64base_index_s64() { + let bases = svdup_n_u64(I64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let _ = svld1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap()); + let loaded = svldff1_gather_u64base_index_s64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u64base_index_u64() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let _ = svld1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap()); + let loaded = svldff1_gather_u64base_index_u64(svptrue_b64(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u32base_offset_f32() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1_gather_u32base_offset_f32( + svptrue_b32(), + bases, + F32_DATA.as_ptr() as i64 + 4u32 as i64, + ); + let loaded = svldff1_gather_u32base_offset_f32( + svptrue_b32(), + bases, + F32_DATA.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u32base_offset_s32() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1_gather_u32base_offset_s32( + svptrue_b32(), + bases, + I32_DATA.as_ptr() as i64 + 4u32 as i64, + ); + let loaded = svldff1_gather_u32base_offset_s32( + svptrue_b32(), + bases, + I32_DATA.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u32base_offset_u32() { + let bases = svindex_u32(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1_gather_u32base_offset_u32( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 + 4u32 as i64, + ); + let loaded = svldff1_gather_u32base_offset_u32( + svptrue_b32(), + bases, + U32_DATA.as_ptr() as i64 + 4u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u64base_offset_f64() { + let bases = svdup_n_u64(F64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let _ = svld1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + let loaded = svldff1_gather_u64base_offset_f64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u64base_offset_s64() { + let bases = svdup_n_u64(I64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let _ = svld1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + let loaded = svldff1_gather_u64base_offset_s64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_gather_u64base_offset_u64() { + let bases = svdup_n_u64(U64_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 8u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b64(), bases, offsets); + svsetffr(); + let _ = svld1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + let loaded = svldff1_gather_u64base_offset_u64(svptrue_b64(), bases, 8u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_f32() { + svsetffr(); + let _ = svld1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_f32(svptrue_b32(), F32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_f32( + loaded, + svcvt_f32_s32_x( + svptrue_b32(), + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_f64() { + svsetffr(); + let _ = svld1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_f64(svptrue_b64(), F64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_f64( + loaded, + svcvt_f64_s64_x( + svptrue_b64(), + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s8() { + svsetffr(); + let _ = svld1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s8(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_i8( + loaded, + svindex_s8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s16() { + svsetffr(); + let _ = svld1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s16(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_s32() { + svsetffr(); + let _ = svld1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s32(svptrue_b32(), I32_DATA.as_ptr(), 1); let len = svcntw() as usize; assert_vector_matches_i32( loaded, @@ -7375,10 +7677,10 @@ unsafe fn test_svldff1sh_vnum_s32() { ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_vnum_s64() { +unsafe fn test_svldff1_vnum_s64() { svsetffr(); - let _ = svld1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); - let loaded = svldff1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let _ = svld1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_s64(svptrue_b64(), I64_DATA.as_ptr(), 1); let len = svcntd() as usize; assert_vector_matches_i64( loaded, @@ -7389,115 +7691,1481 @@ unsafe fn test_svldff1sb_vnum_s64() { ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sh_vnum_s64() { +unsafe fn test_svldff1_vnum_u8() { svsetffr(); - let _ = svld1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); - let loaded = svldff1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); - let len = svcntd() as usize; - assert_vector_matches_i64( + let _ = svld1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u8(svptrue_b8(), U8_DATA.as_ptr(), 1); + let len = svcntb() as usize; + assert_vector_matches_u8( + loaded, + svindex_u8( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u16() { + svsetffr(); + let _ = svld1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u16(svptrue_b16(), U16_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u32() { + svsetffr(); + let _ = svld1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u32(svptrue_b32(), U32_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1_vnum_u64() { + svsetffr(); + let _ = svld1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let loaded = svldff1_vnum_u64(svptrue_b64(), U64_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_s32offset_s32() { + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sb_gather_s32offset_s32(svptrue_b8(), I8_DATA.as_ptr(), offsets); + let loaded = svldff1sb_gather_s32offset_s32(svptrue_b8(), I8_DATA.as_ptr(), offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_s32offset_s32() { + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_s32offset_s32(svptrue_b16(), I16_DATA.as_ptr(), offsets); + let loaded = svldff1sh_gather_s32offset_s32(svptrue_b16(), I16_DATA.as_ptr(), offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_s32offset_u32() { + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sb_gather_s32offset_u32(svptrue_b8(), I8_DATA.as_ptr(), offsets); + let loaded = svldff1sb_gather_s32offset_u32(svptrue_b8(), I8_DATA.as_ptr(), offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_s32offset_u32() { + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_s32offset_u32(svptrue_b16(), I16_DATA.as_ptr(), offsets); + let loaded = svldff1sh_gather_s32offset_u32(svptrue_b16(), I16_DATA.as_ptr(), offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_s64offset_s64() { + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sb_gather_s64offset_s64(svptrue_b8(), I8_DATA.as_ptr(), offsets); + let loaded = svldff1sb_gather_s64offset_s64(svptrue_b8(), I8_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_s64offset_s64() { + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_s64offset_s64(svptrue_b16(), I16_DATA.as_ptr(), offsets); + let loaded = svldff1sh_gather_s64offset_s64(svptrue_b16(), I16_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_s64offset_s64() { + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sw_gather_s64offset_s64(svptrue_b32(), I32_DATA.as_ptr(), offsets); + let loaded = svldff1sw_gather_s64offset_s64(svptrue_b32(), I32_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_s64offset_u64() { + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sb_gather_s64offset_u64(svptrue_b8(), I8_DATA.as_ptr(), offsets); + let loaded = svldff1sb_gather_s64offset_u64(svptrue_b8(), I8_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_s64offset_u64() { + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_s64offset_u64(svptrue_b16(), I16_DATA.as_ptr(), offsets); + let loaded = svldff1sh_gather_s64offset_u64(svptrue_b16(), I16_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_s64offset_u64() { + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sw_gather_s64offset_u64(svptrue_b32(), I32_DATA.as_ptr(), offsets); + let loaded = svldff1sw_gather_s64offset_u64(svptrue_b32(), I32_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_u32offset_s32() { + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sb_gather_u32offset_s32(svptrue_b8(), I8_DATA.as_ptr(), offsets); + let loaded = svldff1sb_gather_u32offset_s32(svptrue_b8(), I8_DATA.as_ptr(), offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u32offset_s32() { + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_u32offset_s32(svptrue_b16(), I16_DATA.as_ptr(), offsets); + let loaded = svldff1sh_gather_u32offset_s32(svptrue_b16(), I16_DATA.as_ptr(), offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_u32offset_u32() { + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sb_gather_u32offset_u32(svptrue_b8(), I8_DATA.as_ptr(), offsets); + let loaded = svldff1sb_gather_u32offset_u32(svptrue_b8(), I8_DATA.as_ptr(), offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u32offset_u32() { + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_u32offset_u32(svptrue_b16(), I16_DATA.as_ptr(), offsets); + let loaded = svldff1sh_gather_u32offset_u32(svptrue_b16(), I16_DATA.as_ptr(), offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_u64offset_s64() { + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sb_gather_u64offset_s64(svptrue_b8(), I8_DATA.as_ptr(), offsets); + let loaded = svldff1sb_gather_u64offset_s64(svptrue_b8(), I8_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u64offset_s64() { + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_u64offset_s64(svptrue_b16(), I16_DATA.as_ptr(), offsets); + let loaded = svldff1sh_gather_u64offset_s64(svptrue_b16(), I16_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_u64offset_s64() { + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sw_gather_u64offset_s64(svptrue_b32(), I32_DATA.as_ptr(), offsets); + let loaded = svldff1sw_gather_u64offset_s64(svptrue_b32(), I32_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_u64offset_u64() { + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sb_gather_u64offset_u64(svptrue_b8(), I8_DATA.as_ptr(), offsets); + let loaded = svldff1sb_gather_u64offset_u64(svptrue_b8(), I8_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u64offset_u64() { + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_u64offset_u64(svptrue_b16(), I16_DATA.as_ptr(), offsets); + let loaded = svldff1sh_gather_u64offset_u64(svptrue_b16(), I16_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_u64offset_u64() { + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sw_gather_u64offset_u64(svptrue_b32(), I32_DATA.as_ptr(), offsets); + let loaded = svldff1sw_gather_u64offset_u64(svptrue_b32(), I32_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_u32base_offset_s32() { + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sb_gather_u32base_offset_s32( + svptrue_b8(), + bases, + I8_DATA.as_ptr() as i64 + 1u32 as i64, + ); + let loaded = svldff1sb_gather_u32base_offset_s32( + svptrue_b8(), + bases, + I8_DATA.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u32base_offset_s32() { + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + I16_DATA.as_ptr() as i64 + 2u32 as i64, + ); + let loaded = svldff1sh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + I16_DATA.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_u32base_offset_u32() { + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sb_gather_u32base_offset_u32( + svptrue_b8(), + bases, + I8_DATA.as_ptr() as i64 + 1u32 as i64, + ); + let loaded = svldff1sb_gather_u32base_offset_u32( + svptrue_b8(), + bases, + I8_DATA.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u32base_offset_u32() { + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + I16_DATA.as_ptr() as i64 + 2u32 as i64, + ); + let loaded = svldff1sh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + I16_DATA.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_u64base_offset_s64() { + let bases = svdup_n_u64(I8_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svsetffr(); + let _ = svld1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + let loaded = svldff1sb_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u64base_offset_s64() { + let bases = svdup_n_u64(I16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svsetffr(); + let _ = svld1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + let loaded = + svldff1sh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_u64base_offset_s64() { + let bases = svdup_n_u64(I32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svsetffr(); + let _ = svld1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + let loaded = + svldff1sw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_u64base_offset_u64() { + let bases = svdup_n_u64(I8_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svsetffr(); + let _ = svld1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + let loaded = svldff1sb_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u64base_offset_u64() { + let bases = svdup_n_u64(I16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svsetffr(); + let _ = svld1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + let loaded = + svldff1sh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_u64base_offset_u64() { + let bases = svdup_n_u64(I32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svsetffr(); + let _ = svld1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + let loaded = + svldff1sw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_u64base_s64() { + let bases = svdup_n_u64(I8_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svsetffr(); + let _ = svld1sb_gather_u64base_s64(svptrue_b8(), bases); + let loaded = svldff1sb_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u64base_s64() { + let bases = svdup_n_u64(I16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svsetffr(); + let _ = svld1sh_gather_u64base_s64(svptrue_b16(), bases); + let loaded = svldff1sh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_u64base_s64() { + let bases = svdup_n_u64(I32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svsetffr(); + let _ = svld1sw_gather_u64base_s64(svptrue_b32(), bases); + let loaded = svldff1sw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_gather_u64base_u64() { + let bases = svdup_n_u64(I8_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svsetffr(); + let _ = svld1sb_gather_u64base_u64(svptrue_b8(), bases); + let loaded = svldff1sb_gather_u64base_u64(svptrue_b8(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u64base_u64() { + let bases = svdup_n_u64(I16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svsetffr(); + let _ = svld1sh_gather_u64base_u64(svptrue_b16(), bases); + let loaded = svldff1sh_gather_u64base_u64(svptrue_b16(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_u64base_u64() { + let bases = svdup_n_u64(I32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svsetffr(); + let _ = svld1sw_gather_u64base_u64(svptrue_b32(), bases); + let loaded = svldff1sw_gather_u64base_u64(svptrue_b32(), bases); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s16() { + svsetffr(); + let _ = svld1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i16( + loaded, + svindex_s16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s32() { + svsetffr(); + let _ = svld1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_s32() { + svsetffr(); + let _ = svld1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_s32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_s64() { + svsetffr(); + let _ = svld1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_s64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_s64() { + svsetffr(); + let _ = svld1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_s64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_s64() { + svsetffr(); + let _ = svld1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1sw_s64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u16() { + svsetffr(); + let _ = svld1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u16(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u16( + loaded, + svindex_u16((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u32() { + svsetffr(); + let _ = svld1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u32(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_u32() { + svsetffr(); + let _ = svld1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_u32(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_u64() { + svsetffr(); + let _ = svld1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + let loaded = svldff1sb_u64(svptrue_b8(), I8_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_u64() { + svsetffr(); + let _ = svld1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + let loaded = svldff1sh_u64(svptrue_b16(), I16_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_u64() { + svsetffr(); + let _ = svld1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + let loaded = svldff1sw_u64(svptrue_b32(), I32_DATA.as_ptr()); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s16() { + svsetffr(); + let _ = svld1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_i16( + loaded, + svindex_s16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s32() { + svsetffr(); + let _ = svld1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_s32() { + svsetffr(); + let _ = svld1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_s32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_i32( + loaded, + svindex_s32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_s64() { + svsetffr(); + let _ = svld1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_s64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_s64() { + svsetffr(); + let _ = svld1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_s64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_vnum_s64() { + svsetffr(); + let _ = svld1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_i64( + loaded, + svindex_s64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u16() { + svsetffr(); + let _ = svld1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcnth() as usize; + assert_vector_matches_u16( + loaded, + svindex_u16( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u32() { + svsetffr(); + let _ = svld1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_u32() { + svsetffr(); + let _ = svld1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntw() as usize; + assert_vector_matches_u32( + loaded, + svindex_u32( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sb_vnum_u64() { + svsetffr(); + let _ = svld1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let loaded = svldff1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_vnum_u64() { + svsetffr(); + let _ = svld1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let loaded = svldff1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_vnum_u64() { + svsetffr(); + let _ = svld1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let loaded = svldff1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); + let len = svcntd() as usize; + assert_vector_matches_u64( + loaded, + svindex_u64( + (len + 0usize).try_into().unwrap(), + 1usize.try_into().unwrap(), + ), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_s32index_s32() { + let indices = svindex_s32(0, 1); + svsetffr(); + let _ = svld1sh_gather_s32index_s32(svptrue_b16(), I16_DATA.as_ptr(), indices); + let loaded = svldff1sh_gather_s32index_s32(svptrue_b16(), I16_DATA.as_ptr(), indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_s32index_u32() { + let indices = svindex_s32(0, 1); + svsetffr(); + let _ = svld1sh_gather_s32index_u32(svptrue_b16(), I16_DATA.as_ptr(), indices); + let loaded = svldff1sh_gather_s32index_u32(svptrue_b16(), I16_DATA.as_ptr(), indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_s64index_s64() { + let indices = svindex_s64(0, 1); + svsetffr(); + let _ = svld1sh_gather_s64index_s64(svptrue_b16(), I16_DATA.as_ptr(), indices); + let loaded = svldff1sh_gather_s64index_s64(svptrue_b16(), I16_DATA.as_ptr(), indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_s64index_s64() { + let indices = svindex_s64(0, 1); + svsetffr(); + let _ = svld1sw_gather_s64index_s64(svptrue_b32(), I32_DATA.as_ptr(), indices); + let loaded = svldff1sw_gather_s64index_s64(svptrue_b32(), I32_DATA.as_ptr(), indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_s64index_u64() { + let indices = svindex_s64(0, 1); + svsetffr(); + let _ = svld1sh_gather_s64index_u64(svptrue_b16(), I16_DATA.as_ptr(), indices); + let loaded = svldff1sh_gather_s64index_u64(svptrue_b16(), I16_DATA.as_ptr(), indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_s64index_u64() { + let indices = svindex_s64(0, 1); + svsetffr(); + let _ = svld1sw_gather_s64index_u64(svptrue_b32(), I32_DATA.as_ptr(), indices); + let loaded = svldff1sw_gather_s64index_u64(svptrue_b32(), I32_DATA.as_ptr(), indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u32index_s32() { + let indices = svindex_u32(0, 1); + svsetffr(); + let _ = svld1sh_gather_u32index_s32(svptrue_b16(), I16_DATA.as_ptr(), indices); + let loaded = svldff1sh_gather_u32index_s32(svptrue_b16(), I16_DATA.as_ptr(), indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u32index_u32() { + let indices = svindex_u32(0, 1); + svsetffr(); + let _ = svld1sh_gather_u32index_u32(svptrue_b16(), I16_DATA.as_ptr(), indices); + let loaded = svldff1sh_gather_u32index_u32(svptrue_b16(), I16_DATA.as_ptr(), indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u64index_s64() { + let indices = svindex_u64(0, 1); + svsetffr(); + let _ = svld1sh_gather_u64index_s64(svptrue_b16(), I16_DATA.as_ptr(), indices); + let loaded = svldff1sh_gather_u64index_s64(svptrue_b16(), I16_DATA.as_ptr(), indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_u64index_s64() { + let indices = svindex_u64(0, 1); + svsetffr(); + let _ = svld1sw_gather_u64index_s64(svptrue_b32(), I32_DATA.as_ptr(), indices); + let loaded = svldff1sw_gather_u64index_s64(svptrue_b32(), I32_DATA.as_ptr(), indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u64index_u64() { + let indices = svindex_u64(0, 1); + svsetffr(); + let _ = svld1sh_gather_u64index_u64(svptrue_b16(), I16_DATA.as_ptr(), indices); + let loaded = svldff1sh_gather_u64index_u64(svptrue_b16(), I16_DATA.as_ptr(), indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_u64index_u64() { + let indices = svindex_u64(0, 1); + svsetffr(); + let _ = svld1sw_gather_u64index_u64(svptrue_b32(), I32_DATA.as_ptr(), indices); + let loaded = svldff1sw_gather_u64index_u64(svptrue_b32(), I32_DATA.as_ptr(), indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u32base_index_s32() { + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_u32base_index_s32( + svptrue_b16(), + bases, + I16_DATA.as_ptr() as i64 / (2u32 as i64) + 1, + ); + let loaded = svldff1sh_gather_u32base_index_s32( + svptrue_b16(), + bases, + I16_DATA.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u32base_index_u32() { + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1sh_gather_u32base_index_u32( + svptrue_b16(), + bases, + I16_DATA.as_ptr() as i64 / (2u32 as i64) + 1, + ); + let loaded = svldff1sh_gather_u32base_index_u32( + svptrue_b16(), + bases, + I16_DATA.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u64base_index_s64() { + let bases = svdup_n_u64(I16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svsetffr(); + let _ = svld1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + let loaded = svldff1sh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_u64base_index_s64() { + let bases = svdup_n_u64(I32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svsetffr(); + let _ = svld1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + let loaded = svldff1sw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sh_gather_u64base_index_u64() { + let bases = svdup_n_u64(I16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svsetffr(); + let _ = svld1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + let loaded = svldff1sh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1sw_gather_u64base_index_u64() { + let bases = svdup_n_u64(I32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svsetffr(); + let _ = svld1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + let loaded = svldff1sw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_s32offset_s32() { + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1ub_gather_s32offset_s32(svptrue_b8(), U8_DATA.as_ptr(), offsets); + let loaded = svldff1ub_gather_s32offset_s32(svptrue_b8(), U8_DATA.as_ptr(), offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_s32offset_s32() { + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_s32offset_s32(svptrue_b16(), U16_DATA.as_ptr(), offsets); + let loaded = svldff1uh_gather_s32offset_s32(svptrue_b16(), U16_DATA.as_ptr(), offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_s32offset_u32() { + let offsets = svindex_s32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1ub_gather_s32offset_u32(svptrue_b8(), U8_DATA.as_ptr(), offsets); + let loaded = svldff1ub_gather_s32offset_u32(svptrue_b8(), U8_DATA.as_ptr(), offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_s32offset_u32() { + let offsets = svindex_s32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_s32offset_u32(svptrue_b16(), U16_DATA.as_ptr(), offsets); + let loaded = svldff1uh_gather_s32offset_u32(svptrue_b16(), U16_DATA.as_ptr(), offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_s64offset_s64() { + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1ub_gather_s64offset_s64(svptrue_b8(), U8_DATA.as_ptr(), offsets); + let loaded = svldff1ub_gather_s64offset_s64(svptrue_b8(), U8_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_s64offset_s64() { + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_s64offset_s64(svptrue_b16(), U16_DATA.as_ptr(), offsets); + let loaded = svldff1uh_gather_s64offset_s64(svptrue_b16(), U16_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_s64offset_s64() { + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uw_gather_s64offset_s64(svptrue_b32(), U32_DATA.as_ptr(), offsets); + let loaded = svldff1uw_gather_s64offset_s64(svptrue_b32(), U32_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_s64offset_u64() { + let offsets = svindex_s64(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1ub_gather_s64offset_u64(svptrue_b8(), U8_DATA.as_ptr(), offsets); + let loaded = svldff1ub_gather_s64offset_u64(svptrue_b8(), U8_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_s64offset_u64() { + let offsets = svindex_s64(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_s64offset_u64(svptrue_b16(), U16_DATA.as_ptr(), offsets); + let loaded = svldff1uh_gather_s64offset_u64(svptrue_b16(), U16_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_s64offset_u64() { + let offsets = svindex_s64(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uw_gather_s64offset_u64(svptrue_b32(), U32_DATA.as_ptr(), offsets); + let loaded = svldff1uw_gather_s64offset_u64(svptrue_b32(), U32_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_u32offset_s32() { + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1ub_gather_u32offset_s32(svptrue_b8(), U8_DATA.as_ptr(), offsets); + let loaded = svldff1ub_gather_u32offset_s32(svptrue_b8(), U8_DATA.as_ptr(), offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u32offset_s32() { + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_u32offset_s32(svptrue_b16(), U16_DATA.as_ptr(), offsets); + let loaded = svldff1uh_gather_u32offset_s32(svptrue_b16(), U16_DATA.as_ptr(), offsets); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_u32offset_u32() { + let offsets = svindex_u32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1ub_gather_u32offset_u32(svptrue_b8(), U8_DATA.as_ptr(), offsets); + let loaded = svldff1ub_gather_u32offset_u32(svptrue_b8(), U8_DATA.as_ptr(), offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u32offset_u32() { + let offsets = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_u32offset_u32(svptrue_b16(), U16_DATA.as_ptr(), offsets); + let loaded = svldff1uh_gather_u32offset_u32(svptrue_b16(), U16_DATA.as_ptr(), offsets); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_u64offset_s64() { + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1ub_gather_u64offset_s64(svptrue_b8(), U8_DATA.as_ptr(), offsets); + let loaded = svldff1ub_gather_u64offset_s64(svptrue_b8(), U8_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u64offset_s64() { + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_u64offset_s64(svptrue_b16(), U16_DATA.as_ptr(), offsets); + let loaded = svldff1uh_gather_u64offset_s64(svptrue_b16(), U16_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_u64offset_s64() { + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uw_gather_u64offset_s64(svptrue_b32(), U32_DATA.as_ptr(), offsets); + let loaded = svldff1uw_gather_u64offset_s64(svptrue_b32(), U32_DATA.as_ptr(), offsets); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_u64offset_u64() { + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1ub_gather_u64offset_u64(svptrue_b8(), U8_DATA.as_ptr(), offsets); + let loaded = svldff1ub_gather_u64offset_u64(svptrue_b8(), U8_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u64offset_u64() { + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_u64offset_u64(svptrue_b16(), U16_DATA.as_ptr(), offsets); + let loaded = svldff1uh_gather_u64offset_u64(svptrue_b16(), U16_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_u64offset_u64() { + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uw_gather_u64offset_u64(svptrue_b32(), U32_DATA.as_ptr(), offsets); + let loaded = svldff1uw_gather_u64offset_u64(svptrue_b32(), U32_DATA.as_ptr(), offsets); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_u32base_offset_s32() { + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1ub_gather_u32base_offset_s32( + svptrue_b8(), + bases, + U8_DATA.as_ptr() as i64 + 1u32 as i64, + ); + let loaded = svldff1ub_gather_u32base_offset_s32( + svptrue_b8(), + bases, + U8_DATA.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u32base_offset_s32() { + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + U16_DATA.as_ptr() as i64 + 2u32 as i64, + ); + let loaded = svldff1uh_gather_u32base_offset_s32( + svptrue_b16(), + bases, + U16_DATA.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_u32base_offset_u32() { + let bases = svindex_u32(0, 1u32.try_into().unwrap()); + svsetffr(); + let _ = svld1ub_gather_u32base_offset_u32( + svptrue_b8(), + bases, + U8_DATA.as_ptr() as i64 + 1u32 as i64, + ); + let loaded = svldff1ub_gather_u32base_offset_u32( + svptrue_b8(), + bases, + U8_DATA.as_ptr() as i64 + 1u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u32base_offset_u32() { + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + U16_DATA.as_ptr() as i64 + 2u32 as i64, + ); + let loaded = svldff1uh_gather_u32base_offset_u32( + svptrue_b16(), + bases, + U16_DATA.as_ptr() as i64 + 2u32 as i64, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_u64base_offset_s64() { + let bases = svdup_n_u64(U8_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svsetffr(); + let _ = svld1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + let loaded = svldff1ub_gather_u64base_offset_s64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u64base_offset_s64() { + let bases = svdup_n_u64(U16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svsetffr(); + let _ = svld1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + let loaded = + svldff1uh_gather_u64base_offset_s64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_u64base_offset_s64() { + let bases = svdup_n_u64(U32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svsetffr(); + let _ = svld1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + let loaded = + svldff1uw_gather_u64base_offset_s64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1ub_gather_u64base_offset_u64() { + let bases = svdup_n_u64(U8_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); + svsetffr(); + let _ = svld1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + let loaded = svldff1ub_gather_u64base_offset_u64(svptrue_b8(), bases, 1u32.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u64base_offset_u64() { + let bases = svdup_n_u64(U16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svsetffr(); + let _ = svld1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + let loaded = + svldff1uh_gather_u64base_offset_u64(svptrue_b16(), bases, 2u32.try_into().unwrap()); + assert_vector_matches_u64( loaded, - svindex_s64( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sw_vnum_s64() { +unsafe fn test_svldff1uw_gather_u64base_offset_u64() { + let bases = svdup_n_u64(U32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); svsetffr(); - let _ = svld1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); - let loaded = svldff1sw_vnum_s64(svptrue_b32(), I32_DATA.as_ptr(), 1); - let len = svcntd() as usize; - assert_vector_matches_i64( + let _ = svld1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + let loaded = + svldff1uw_gather_u64base_offset_u64(svptrue_b32(), bases, 4u32.try_into().unwrap()); + assert_vector_matches_u64( loaded, - svindex_s64( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_vnum_u16() { +unsafe fn test_svldff1ub_gather_u64base_s64() { + let bases = svdup_n_u64(U8_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); svsetffr(); - let _ = svld1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); - let loaded = svldff1sb_vnum_u16(svptrue_b8(), I8_DATA.as_ptr(), 1); - let len = svcnth() as usize; - assert_vector_matches_u16( + let _ = svld1ub_gather_u64base_s64(svptrue_b8(), bases); + let loaded = svldff1ub_gather_u64base_s64(svptrue_b8(), bases); + assert_vector_matches_i64( loaded, - svindex_u16( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_vnum_u32() { +unsafe fn test_svldff1uh_gather_u64base_s64() { + let bases = svdup_n_u64(U16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); svsetffr(); - let _ = svld1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); - let loaded = svldff1sb_vnum_u32(svptrue_b8(), I8_DATA.as_ptr(), 1); - let len = svcntw() as usize; - assert_vector_matches_u32( + let _ = svld1uh_gather_u64base_s64(svptrue_b16(), bases); + let loaded = svldff1uh_gather_u64base_s64(svptrue_b16(), bases); + assert_vector_matches_i64( loaded, - svindex_u32( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sh_vnum_u32() { +unsafe fn test_svldff1uw_gather_u64base_s64() { + let bases = svdup_n_u64(U32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); svsetffr(); - let _ = svld1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); - let loaded = svldff1sh_vnum_u32(svptrue_b16(), I16_DATA.as_ptr(), 1); - let len = svcntw() as usize; - assert_vector_matches_u32( + let _ = svld1uw_gather_u64base_s64(svptrue_b32(), bases); + let loaded = svldff1uw_gather_u64base_s64(svptrue_b32(), bases); + assert_vector_matches_i64( loaded, - svindex_u32( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sb_vnum_u64() { +unsafe fn test_svldff1ub_gather_u64base_u64() { + let bases = svdup_n_u64(U8_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 1u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b8(), bases, offsets); svsetffr(); - let _ = svld1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); - let loaded = svldff1sb_vnum_u64(svptrue_b8(), I8_DATA.as_ptr(), 1); - let len = svcntd() as usize; + let _ = svld1ub_gather_u64base_u64(svptrue_b8(), bases); + let loaded = svldff1ub_gather_u64base_u64(svptrue_b8(), bases); assert_vector_matches_u64( loaded, - svindex_u64( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sh_vnum_u64() { +unsafe fn test_svldff1uh_gather_u64base_u64() { + let bases = svdup_n_u64(U16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); svsetffr(); - let _ = svld1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); - let loaded = svldff1sh_vnum_u64(svptrue_b16(), I16_DATA.as_ptr(), 1); - let len = svcntd() as usize; + let _ = svld1uh_gather_u64base_u64(svptrue_b16(), bases); + let loaded = svldff1uh_gather_u64base_u64(svptrue_b16(), bases); assert_vector_matches_u64( loaded, - svindex_u64( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] -unsafe fn test_svldff1sw_vnum_u64() { +unsafe fn test_svldff1uw_gather_u64base_u64() { + let bases = svdup_n_u64(U32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); svsetffr(); - let _ = svld1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); - let loaded = svldff1sw_vnum_u64(svptrue_b32(), I32_DATA.as_ptr(), 1); - let len = svcntd() as usize; + let _ = svld1uw_gather_u64base_u64(svptrue_b32(), bases); + let loaded = svldff1uw_gather_u64base_u64(svptrue_b32(), bases); assert_vector_matches_u64( loaded, - svindex_u64( - (len + 0usize).try_into().unwrap(), - 1usize.try_into().unwrap(), - ), + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), ); } #[simd_test(enable = "sve")] @@ -7789,6 +9457,228 @@ unsafe fn test_svldff1uw_vnum_u64() { ); } #[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_s32index_s32() { + let indices = svindex_s32(0, 1); + svsetffr(); + let _ = svld1uh_gather_s32index_s32(svptrue_b16(), U16_DATA.as_ptr(), indices); + let loaded = svldff1uh_gather_s32index_s32(svptrue_b16(), U16_DATA.as_ptr(), indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_s32index_u32() { + let indices = svindex_s32(0, 1); + svsetffr(); + let _ = svld1uh_gather_s32index_u32(svptrue_b16(), U16_DATA.as_ptr(), indices); + let loaded = svldff1uh_gather_s32index_u32(svptrue_b16(), U16_DATA.as_ptr(), indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_s64index_s64() { + let indices = svindex_s64(0, 1); + svsetffr(); + let _ = svld1uh_gather_s64index_s64(svptrue_b16(), U16_DATA.as_ptr(), indices); + let loaded = svldff1uh_gather_s64index_s64(svptrue_b16(), U16_DATA.as_ptr(), indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_s64index_s64() { + let indices = svindex_s64(0, 1); + svsetffr(); + let _ = svld1uw_gather_s64index_s64(svptrue_b32(), U32_DATA.as_ptr(), indices); + let loaded = svldff1uw_gather_s64index_s64(svptrue_b32(), U32_DATA.as_ptr(), indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_s64index_u64() { + let indices = svindex_s64(0, 1); + svsetffr(); + let _ = svld1uh_gather_s64index_u64(svptrue_b16(), U16_DATA.as_ptr(), indices); + let loaded = svldff1uh_gather_s64index_u64(svptrue_b16(), U16_DATA.as_ptr(), indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_s64index_u64() { + let indices = svindex_s64(0, 1); + svsetffr(); + let _ = svld1uw_gather_s64index_u64(svptrue_b32(), U32_DATA.as_ptr(), indices); + let loaded = svldff1uw_gather_s64index_u64(svptrue_b32(), U32_DATA.as_ptr(), indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u32index_s32() { + let indices = svindex_u32(0, 1); + svsetffr(); + let _ = svld1uh_gather_u32index_s32(svptrue_b16(), U16_DATA.as_ptr(), indices); + let loaded = svldff1uh_gather_u32index_s32(svptrue_b16(), U16_DATA.as_ptr(), indices); + assert_vector_matches_i32( + loaded, + svindex_s32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u32index_u32() { + let indices = svindex_u32(0, 1); + svsetffr(); + let _ = svld1uh_gather_u32index_u32(svptrue_b16(), U16_DATA.as_ptr(), indices); + let loaded = svldff1uh_gather_u32index_u32(svptrue_b16(), U16_DATA.as_ptr(), indices); + assert_vector_matches_u32( + loaded, + svindex_u32((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u64index_s64() { + let indices = svindex_u64(0, 1); + svsetffr(); + let _ = svld1uh_gather_u64index_s64(svptrue_b16(), U16_DATA.as_ptr(), indices); + let loaded = svldff1uh_gather_u64index_s64(svptrue_b16(), U16_DATA.as_ptr(), indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_u64index_s64() { + let indices = svindex_u64(0, 1); + svsetffr(); + let _ = svld1uw_gather_u64index_s64(svptrue_b32(), U32_DATA.as_ptr(), indices); + let loaded = svldff1uw_gather_u64index_s64(svptrue_b32(), U32_DATA.as_ptr(), indices); + assert_vector_matches_i64( + loaded, + svindex_s64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u64index_u64() { + let indices = svindex_u64(0, 1); + svsetffr(); + let _ = svld1uh_gather_u64index_u64(svptrue_b16(), U16_DATA.as_ptr(), indices); + let loaded = svldff1uh_gather_u64index_u64(svptrue_b16(), U16_DATA.as_ptr(), indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_u64index_u64() { + let indices = svindex_u64(0, 1); + svsetffr(); + let _ = svld1uw_gather_u64index_u64(svptrue_b32(), U32_DATA.as_ptr(), indices); + let loaded = svldff1uw_gather_u64index_u64(svptrue_b32(), U32_DATA.as_ptr(), indices); + assert_vector_matches_u64( + loaded, + svindex_u64((0usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u32base_index_s32() { + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_u32base_index_s32( + svptrue_b16(), + bases, + U16_DATA.as_ptr() as i64 / (2u32 as i64) + 1, + ); + let loaded = svldff1uh_gather_u32base_index_s32( + svptrue_b16(), + bases, + U16_DATA.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_i32( + loaded, + svindex_s32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u32base_index_u32() { + let bases = svindex_u32(0, 2u32.try_into().unwrap()); + svsetffr(); + let _ = svld1uh_gather_u32base_index_u32( + svptrue_b16(), + bases, + U16_DATA.as_ptr() as i64 / (2u32 as i64) + 1, + ); + let loaded = svldff1uh_gather_u32base_index_u32( + svptrue_b16(), + bases, + U16_DATA.as_ptr() as i64 / (2u32 as i64) + 1, + ); + assert_vector_matches_u32( + loaded, + svindex_u32((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u64base_index_s64() { + let bases = svdup_n_u64(U16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svsetffr(); + let _ = svld1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + let loaded = svldff1uh_gather_u64base_index_s64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_u64base_index_s64() { + let bases = svdup_n_u64(U32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svsetffr(); + let _ = svld1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + let loaded = svldff1uw_gather_u64base_index_s64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_i64( + loaded, + svindex_s64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uh_gather_u64base_index_u64() { + let bases = svdup_n_u64(U16_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 2u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b16(), bases, offsets); + svsetffr(); + let _ = svld1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + let loaded = svldff1uh_gather_u64base_index_u64(svptrue_b16(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] +unsafe fn test_svldff1uw_gather_u64base_index_u64() { + let bases = svdup_n_u64(U32_DATA.as_ptr() as u64); + let offsets = svindex_u64(0, 4u32.try_into().unwrap()); + let bases = svadd_u64_x(svptrue_b32(), bases, offsets); + svsetffr(); + let _ = svld1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + let loaded = svldff1uw_gather_u64base_index_u64(svptrue_b32(), bases, 1.try_into().unwrap()); + assert_vector_matches_u64( + loaded, + svindex_u64((1usize).try_into().unwrap(), 1usize.try_into().unwrap()), + ); +} +#[simd_test(enable = "sve")] unsafe fn test_svldnf1_f32() { svsetffr(); let _ = svld1_f32(svptrue_b32(), F32_DATA.as_ptr()); diff --git a/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs b/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs index 0f4de83dacb4a..cbd5df50de5b6 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/load_store_tests.rs @@ -141,13 +141,6 @@ fn generate_single_test( } } - if fn_name.starts_with("svldff1") && fn_name.contains("gather") { - // TODO: We can remove this check when first-faulting gathers are fixed in CI's QEMU - // https://gitlab.com/qemu-project/qemu/-/issues/1612 - println!("Skipping test for {fn_name}"); - return Ok(quote!()); - } - let fn_ident = format_ident!("{fn_name}"); let test_name = format_ident!( "test_{fn_name}{}", From 9671a92ea645dced89735cfae6f02ebf0476e28f Mon Sep 17 00:00:00 2001 From: sayantn Date: Tue, 14 Apr 2026 14:38:28 +0530 Subject: [PATCH 11/30] Remove uses of asm --- .../crates/core_arch/src/x86/avx512bf16.rs | 38 +---- .../crates/core_arch/src/x86/avx512fp16.rs | 154 ++++-------------- .../crates/core_arch/src/x86/avxneconvert.rs | 28 +--- 3 files changed, 43 insertions(+), 177 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs b/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs index 66eef063eed8b..8d944f5ba817b 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bf16.rs @@ -2,7 +2,6 @@ //! //! [AVX512BF16 intrinsics]: https://software.intel.com/sites/landingpage/IntrinsicsGuide/#expand=1769&avx512techs=AVX512_BF16 -use crate::arch::asm; use crate::core_arch::{simd::*, x86::*}; use crate::intrinsics::simd::*; @@ -17,6 +16,8 @@ unsafe extern "C" { fn cvtne2ps2bf16_256(a: f32x8, b: f32x8) -> i16x16; #[link_name = "llvm.x86.avx512bf16.cvtne2ps2bf16.512"] fn cvtne2ps2bf16_512(a: f32x16, b: f32x16) -> i16x32; + #[link_name = "llvm.x86.avx512bf16.mask.cvtneps2bf16.128"] + fn cvtneps2bf16_128(a: f32x4, src: i16x8, k: __mmask8) -> i16x8; #[link_name = "llvm.x86.avx512bf16.cvtneps2bf16.256"] fn cvtneps2bf16_256(a: f32x8) -> i16x8; #[link_name = "llvm.x86.avx512bf16.cvtneps2bf16.512"] @@ -519,16 +520,7 @@ pub fn _mm_cvtsbh_ss(a: bf16) -> f32 { #[cfg_attr(test, assert_instr("vcvtneps2bf16"))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_cvtneps_pbh(a: __m128) -> __m128bh { - unsafe { - let mut dst: __m128bh; - asm!( - "vcvtneps2bf16 {dst}, {src}", - dst = lateout(xmm_reg) dst, - src = in(xmm_reg) a, - options(pure, nomem, nostack, preserves_flags) - ); - dst - } + _mm_mask_cvtneps_pbh(__m128bh::splat(0), !0, a) } /// Converts packed single-precision (32-bit) floating-point elements in a to packed BF16 (16-bit) @@ -541,17 +533,7 @@ pub fn _mm_cvtneps_pbh(a: __m128) -> __m128bh { #[cfg_attr(test, assert_instr("vcvtneps2bf16"))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_cvtneps_pbh(src: __m128bh, k: __mmask8, a: __m128) -> __m128bh { - unsafe { - let mut dst = src; - asm!( - "vcvtneps2bf16 {dst}{{{k}}},{src}", - dst = inlateout(xmm_reg) dst, - src = in(xmm_reg) a, - k = in(kreg) k, - options(pure, nomem, nostack, preserves_flags) - ); - dst - } + unsafe { cvtneps2bf16_128(a.as_f32x4(), src.as_i16x8(), k).as_m128bh() } } /// Converts packed single-precision (32-bit) floating-point elements in a to packed BF16 (16-bit) @@ -564,17 +546,7 @@ pub fn _mm_mask_cvtneps_pbh(src: __m128bh, k: __mmask8, a: __m128) -> __m128bh { #[cfg_attr(test, assert_instr("vcvtneps2bf16"))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_maskz_cvtneps_pbh(k: __mmask8, a: __m128) -> __m128bh { - unsafe { - let mut dst: __m128bh; - asm!( - "vcvtneps2bf16 {dst}{{{k}}}{{z}},{src}", - dst = lateout(xmm_reg) dst, - src = in(xmm_reg) a, - k = in(kreg) k, - options(pure, nomem, nostack, preserves_flags) - ); - dst - } + _mm_mask_cvtneps_pbh(__m128bh::splat(0), k, a) } /// Converts a single-precision (32-bit) floating-point element in a to a BF16 (16-bit) floating-point diff --git a/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs b/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs index 8ddc3d29a3a11..6523e98d0ca8d 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512fp16.rs @@ -695,34 +695,6 @@ pub const fn _mm512_zextph128_ph512(a: __m128h) -> __m512h { } } -macro_rules! cmp_asm { // FIXME: use LLVM intrinsics - ($mask_type: ty, $reg: ident, $a: expr, $b: expr) => {{ - let dst: $mask_type; - asm!( - "vcmpph {k}, {a}, {b}, {imm8}", - k = lateout(kreg) dst, - a = in($reg) $a, - b = in($reg) $b, - imm8 = const IMM5, - options(pure, nomem, nostack) - ); - dst - }}; - ($mask_type: ty, $mask: expr, $reg: ident, $a: expr, $b: expr) => {{ - let dst: $mask_type; - asm!( - "vcmpph {k} {{ {mask} }}, {a}, {b}, {imm8}", - k = lateout(kreg) dst, - mask = in(kreg) $mask, - a = in($reg) $a, - b = in($reg) $b, - imm8 = const IMM5, - options(pure, nomem, nostack) - ); - dst - }}; -} - /// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison /// operand specified by imm8, and store the results in mask vector k. /// @@ -732,10 +704,7 @@ macro_rules! cmp_asm { // FIXME: use LLVM intrinsics #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")] pub fn _mm_cmp_ph_mask(a: __m128h, b: __m128h) -> __mmask8 { - unsafe { - static_assert_uimm_bits!(IMM5, 5); - cmp_asm!(__mmask8, xmm_reg, a, b) - } + _mm_mask_cmp_ph_mask::(!0, a, b) } /// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison @@ -750,7 +719,7 @@ pub fn _mm_cmp_ph_mask(a: __m128h, b: __m128h) -> __mmask8 { pub fn _mm_mask_cmp_ph_mask(k1: __mmask8, a: __m128h, b: __m128h) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM5, 5); - cmp_asm!(__mmask8, k1, xmm_reg, a, b) + vcmpph_128(a, b, IMM5, k1) } } @@ -763,10 +732,7 @@ pub fn _mm_mask_cmp_ph_mask(k1: __mmask8, a: __m128h, b: __m128 #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")] pub fn _mm256_cmp_ph_mask(a: __m256h, b: __m256h) -> __mmask16 { - unsafe { - static_assert_uimm_bits!(IMM5, 5); - cmp_asm!(__mmask16, ymm_reg, a, b) - } + _mm256_mask_cmp_ph_mask::(!0, a, b) } /// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison @@ -785,7 +751,7 @@ pub fn _mm256_mask_cmp_ph_mask( ) -> __mmask16 { unsafe { static_assert_uimm_bits!(IMM5, 5); - cmp_asm!(__mmask16, k1, ymm_reg, a, b) + vcmpph_256(a, b, IMM5, k1) } } @@ -798,10 +764,7 @@ pub fn _mm256_mask_cmp_ph_mask( #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")] pub fn _mm512_cmp_ph_mask(a: __m512h, b: __m512h) -> __mmask32 { - unsafe { - static_assert_uimm_bits!(IMM5, 5); - cmp_asm!(__mmask32, zmm_reg, a, b) - } + _mm512_mask_cmp_ph_mask::(!0, a, b) } /// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison @@ -818,10 +781,7 @@ pub fn _mm512_mask_cmp_ph_mask( a: __m512h, b: __m512h, ) -> __mmask32 { - unsafe { - static_assert_uimm_bits!(IMM5, 5); - cmp_asm!(__mmask32, k1, zmm_reg, a, b) - } + _mm512_mask_cmp_round_ph_mask::(k1, a, b) } /// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison @@ -838,24 +798,7 @@ pub fn _mm512_cmp_round_ph_mask( a: __m512h, b: __m512h, ) -> __mmask32 { - unsafe { - static_assert_uimm_bits!(IMM5, 5); - static_assert_sae!(SAE); - if SAE == _MM_FROUND_NO_EXC { - let dst: __mmask32; - asm!( - "vcmpph {k}, {a}, {b}, {{sae}}, {imm8}", - k = lateout(kreg) dst, - a = in(zmm_reg) a, - b = in(zmm_reg) b, - imm8 = const IMM5, - options(pure, nomem, nostack) - ); - dst - } else { - cmp_asm!(__mmask32, zmm_reg, a, b) - } - } + _mm512_mask_cmp_round_ph_mask::(!0, a, b) } /// Compare packed half-precision (16-bit) floating-point elements in a and b based on the comparison @@ -877,21 +820,7 @@ pub fn _mm512_mask_cmp_round_ph_mask( unsafe { static_assert_uimm_bits!(IMM5, 5); static_assert_sae!(SAE); - if SAE == _MM_FROUND_NO_EXC { - let dst: __mmask32; - asm!( - "vcmpph {k} {{{k1}}}, {a}, {b}, {{sae}}, {imm8}", - k = lateout(kreg) dst, - k1 = in(kreg) k1, - a = in(zmm_reg) a, - b = in(zmm_reg) b, - imm8 = const IMM5, - options(pure, nomem, nostack) - ); - dst - } else { - cmp_asm!(__mmask32, k1, zmm_reg, a, b) - } + vcmpph_512(a, b, IMM5, k1, SAE) } } @@ -11538,32 +11467,6 @@ pub fn _mm512_reduce_max_ph(a: __m512h) -> f16 { } } -macro_rules! fpclass_asm { // FIXME: use LLVM intrinsics - ($mask_type: ty, $reg: ident, $a: expr) => {{ - let dst: $mask_type; - asm!( - "vfpclassph {k}, {src}, {imm8}", - k = lateout(kreg) dst, - src = in($reg) $a, - imm8 = const IMM8, - options(pure, nomem, nostack) - ); - dst - }}; - ($mask_type: ty, $mask: expr, $reg: ident, $a: expr) => {{ - let dst: $mask_type; - asm!( - "vfpclassph {k} {{ {mask} }}, {src}, {imm8}", - k = lateout(kreg) dst, - mask = in(kreg) $mask, - src = in($reg) $a, - imm8 = const IMM8, - options(pure, nomem, nostack) - ); - dst - }}; -} - /// Test packed half-precision (16-bit) floating-point elements in a for special categories specified /// by imm8, and store the results in mask vector k. /// imm can be a combination of: @@ -11586,7 +11489,7 @@ macro_rules! fpclass_asm { // FIXME: use LLVM intrinsics pub fn _mm_fpclass_ph_mask(a: __m128h) -> __mmask8 { unsafe { static_assert_uimm_bits!(IMM8, 8); - fpclass_asm!(__mmask8, xmm_reg, a) + vfpclassph_128(a, IMM8) } } @@ -11611,10 +11514,7 @@ pub fn _mm_fpclass_ph_mask(a: __m128h) -> __mmask8 { #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")] pub fn _mm_mask_fpclass_ph_mask(k1: __mmask8, a: __m128h) -> __mmask8 { - unsafe { - static_assert_uimm_bits!(IMM8, 8); - fpclass_asm!(__mmask8, k1, xmm_reg, a) - } + _mm_fpclass_ph_mask::(a) & k1 } /// Test packed half-precision (16-bit) floating-point elements in a for special categories specified @@ -11639,7 +11539,7 @@ pub fn _mm_mask_fpclass_ph_mask(k1: __mmask8, a: __m128h) -> __ pub fn _mm256_fpclass_ph_mask(a: __m256h) -> __mmask16 { unsafe { static_assert_uimm_bits!(IMM8, 8); - fpclass_asm!(__mmask16, ymm_reg, a) + vfpclassph_256(a, IMM8) } } @@ -11664,10 +11564,7 @@ pub fn _mm256_fpclass_ph_mask(a: __m256h) -> __mmask16 { #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")] pub fn _mm256_mask_fpclass_ph_mask(k1: __mmask16, a: __m256h) -> __mmask16 { - unsafe { - static_assert_uimm_bits!(IMM8, 8); - fpclass_asm!(__mmask16, k1, ymm_reg, a) - } + _mm256_fpclass_ph_mask::(a) & k1 } /// Test packed half-precision (16-bit) floating-point elements in a for special categories specified @@ -11692,7 +11589,7 @@ pub fn _mm256_mask_fpclass_ph_mask(k1: __mmask16, a: __m256h) - pub fn _mm512_fpclass_ph_mask(a: __m512h) -> __mmask32 { unsafe { static_assert_uimm_bits!(IMM8, 8); - fpclass_asm!(__mmask32, zmm_reg, a) + vfpclassph_512(a, IMM8) } } @@ -11717,10 +11614,7 @@ pub fn _mm512_fpclass_ph_mask(a: __m512h) -> __mmask32 { #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512fp16", since = "1.94.0")] pub fn _mm512_mask_fpclass_ph_mask(k1: __mmask32, a: __m512h) -> __mmask32 { - unsafe { - static_assert_uimm_bits!(IMM8, 8); - fpclass_asm!(__mmask32, k1, zmm_reg, a) - } + _mm512_fpclass_ph_mask::(a) & k1 } /// Test the lower half-precision (16-bit) floating-point element in a for special categories specified @@ -16571,11 +16465,18 @@ pub const fn _mm_cvtsi16_si128(a: i16) -> __m128i { } #[allow(improper_ctypes)] -unsafe extern "C" { +unsafe extern "unadjusted" { + #[link_name = "llvm.x86.avx512fp16.mask.cmp.ph.128"] + fn vcmpph_128(a: __m128h, b: __m128h, imm5: i32, mask: __mmask8) -> __mmask8; + #[link_name = "llvm.x86.avx512fp16.mask.cmp.ph.256"] + fn vcmpph_256(a: __m256h, b: __m256h, imm5: i32, mask: __mmask16) -> __mmask16; + #[link_name = "llvm.x86.avx512fp16.mask.cmp.ph.512"] + fn vcmpph_512(a: __m512h, b: __m512h, imm5: i32, mask: __mmask32, sae: i32) -> __mmask32; + #[link_name = "llvm.x86.avx512fp16.mask.cmp.sh"] - fn vcmpsh(a: __m128h, b: __m128h, imm8: i32, mask: __mmask8, sae: i32) -> __mmask8; + fn vcmpsh(a: __m128h, b: __m128h, imm5: i32, mask: __mmask8, sae: i32) -> __mmask8; #[link_name = "llvm.x86.avx512fp16.vcomi.sh"] - fn vcomish(a: __m128h, b: __m128h, imm8: i32, sae: i32) -> i32; + fn vcomish(a: __m128h, b: __m128h, imm5: i32, sae: i32) -> i32; #[link_name = "llvm.x86.avx512fp16.add.ph.512"] fn vaddph(a: __m512h, b: __m512h, rounding: i32) -> __m512h; @@ -16758,6 +16659,13 @@ unsafe extern "C" { fn vreducesh(a: __m128h, b: __m128h, src: __m128h, k: __mmask8, imm8: i32, sae: i32) -> __m128h; + #[link_name = "llvm.x86.avx512fp16.fpclass.ph.128"] + fn vfpclassph_128(a: __m128h, imm8: i32) -> __mmask8; + #[link_name = "llvm.x86.avx512fp16.fpclass.ph.256"] + fn vfpclassph_256(a: __m256h, imm8: i32) -> __mmask16; + #[link_name = "llvm.x86.avx512fp16.fpclass.ph.512"] + fn vfpclassph_512(a: __m512h, imm8: i32) -> __mmask32; + #[link_name = "llvm.x86.avx512fp16.mask.fpclass.sh"] fn vfpclasssh(a: __m128h, imm8: i32, k: __mmask8) -> __mmask8; diff --git a/library/stdarch/crates/core_arch/src/x86/avxneconvert.rs b/library/stdarch/crates/core_arch/src/x86/avxneconvert.rs index b8a3b9473af9e..861213eb4257f 100644 --- a/library/stdarch/crates/core_arch/src/x86/avxneconvert.rs +++ b/library/stdarch/crates/core_arch/src/x86/avxneconvert.rs @@ -1,4 +1,3 @@ -use crate::arch::asm; use crate::core_arch::x86::*; #[cfg(test)] @@ -161,16 +160,7 @@ pub unsafe fn _mm256_cvtneoph_ps(a: *const __m256h) -> __m256 { #[cfg_attr(test, assert_instr(vcvtneps2bf16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_cvtneps_avx_pbh(a: __m128) -> __m128bh { - unsafe { - let mut dst: __m128bh; - asm!( - "{{vex}}vcvtneps2bf16 {dst},{src}", - dst = lateout(xmm_reg) dst, - src = in(xmm_reg) a, - options(pure, nomem, nostack, preserves_flags) - ); - dst - } + unsafe { vcvtneps2bf16_128(a) } } /// Convert packed single precision (32-bit) floating-point elements in a to packed BF16 (16-bit) floating-point @@ -182,16 +172,7 @@ pub fn _mm_cvtneps_avx_pbh(a: __m128) -> __m128bh { #[cfg_attr(test, assert_instr(vcvtneps2bf16))] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_cvtneps_avx_pbh(a: __m256) -> __m128bh { - unsafe { - let mut dst: __m128bh; - asm!( - "{{vex}}vcvtneps2bf16 {dst},{src}", - dst = lateout(xmm_reg) dst, - src = in(ymm_reg) a, - options(pure, nomem, nostack, preserves_flags) - ); - dst - } + unsafe { vcvtneps2bf16_256(a) } } #[allow(improper_ctypes)] @@ -222,6 +203,11 @@ unsafe extern "C" { fn cvtneoph2ps_128(a: *const __m128h) -> __m128; #[link_name = "llvm.x86.vcvtneoph2ps256"] fn cvtneoph2ps_256(a: *const __m256h) -> __m256; + + #[link_name = "llvm.x86.vcvtneps2bf16128"] + fn vcvtneps2bf16_128(a: __m128) -> __m128bh; + #[link_name = "llvm.x86.vcvtneps2bf16256"] + fn vcvtneps2bf16_256(a: __m256) -> __m128bh; } #[cfg(test)] From 00ca9ec02d53cd5f6046e6bdd23d81a2eabfd7de Mon Sep 17 00:00:00 2001 From: sayantn Date: Thu, 16 Apr 2026 11:23:54 +0530 Subject: [PATCH 12/30] Remove uses of deprecated intrinsics --- .../core_arch/src/aarch64/sve/generated.rs | 32 ++++----- .../crates/core_arch/src/x86/avx512bitalg.rs | 24 +++---- .../crates/core_arch/src/x86/avx512dq.rs | 68 +++++++------------ .../crates/core_arch/src/x86/avx512f.rs | 18 ++--- .../stdarch-gen-arm/spec/sve/aarch64.spec.yml | 12 ++-- 5 files changed, 65 insertions(+), 89 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs index ed28e98a813ea..5f26d61e7c84f 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs @@ -9799,7 +9799,7 @@ pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t { unsafe extern "unadjusted" { #[cfg_attr( target_arch = "aarch64", - link_name = "llvm.experimental.vector.insert.nxv4f32.v4f32" + link_name = "llvm.vector.insert.nxv4f32.v4f32" )] fn _svdupq_n_f32(op0: svfloat32_t, op1: float32x4_t, idx: i64) -> svfloat32_t; } @@ -9817,7 +9817,7 @@ pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t { unsafe extern "unadjusted" { #[cfg_attr( target_arch = "aarch64", - link_name = "llvm.experimental.vector.insert.nxv4i32.v4i32" + link_name = "llvm.vector.insert.nxv4i32.v4i32" )] fn _svdupq_n_s32(op0: svint32_t, op1: int32x4_t, idx: i64) -> svint32_t; } @@ -9851,7 +9851,7 @@ pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t { unsafe extern "unadjusted" { #[cfg_attr( target_arch = "aarch64", - link_name = "llvm.experimental.vector.insert.nxv2f64.v2f64" + link_name = "llvm.vector.insert.nxv2f64.v2f64" )] fn _svdupq_n_f64(op0: svfloat64_t, op1: float64x2_t, idx: i64) -> svfloat64_t; } @@ -9869,7 +9869,7 @@ pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t { unsafe extern "unadjusted" { #[cfg_attr( target_arch = "aarch64", - link_name = "llvm.experimental.vector.insert.nxv2i64.v2i64" + link_name = "llvm.vector.insert.nxv2i64.v2i64" )] fn _svdupq_n_s64(op0: svint64_t, op1: int64x2_t, idx: i64) -> svint64_t; } @@ -9904,7 +9904,7 @@ pub fn svdupq_n_s16( unsafe extern "unadjusted" { #[cfg_attr( target_arch = "aarch64", - link_name = "llvm.experimental.vector.insert.nxv8i16.v8i16" + link_name = "llvm.vector.insert.nxv8i16.v8i16" )] fn _svdupq_n_s16(op0: svint16_t, op1: int16x8_t, idx: i64) -> svint16_t; } @@ -9972,7 +9972,7 @@ pub fn svdupq_n_s8( unsafe extern "unadjusted" { #[cfg_attr( target_arch = "aarch64", - link_name = "llvm.experimental.vector.insert.nxv16i8.v16i8" + link_name = "llvm.vector.insert.nxv16i8.v16i8" )] fn _svdupq_n_s8(op0: svint8_t, op1: int8x16_t, idx: i64) -> svint8_t; } @@ -35208,7 +35208,7 @@ pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t { #[cfg_attr(test, assert_instr(rev))] pub fn svrev_b8(op: svbool_t) -> svbool_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i1")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse.nxv16i1")] fn _svrev_b8(op: svbool_t) -> svbool_t; } unsafe { _svrev_b8(op) } @@ -35221,7 +35221,7 @@ pub fn svrev_b8(op: svbool_t) -> svbool_t { #[cfg_attr(test, assert_instr(rev))] pub fn svrev_b16(op: svbool_t) -> svbool_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i1")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse.nxv8i1")] fn _svrev_b16(op: svbool8_t) -> svbool8_t; } unsafe { _svrev_b16(op.sve_into()).sve_into() } @@ -35234,7 +35234,7 @@ pub fn svrev_b16(op: svbool_t) -> svbool_t { #[cfg_attr(test, assert_instr(rev))] pub fn svrev_b32(op: svbool_t) -> svbool_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i1")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse.nxv4i1")] fn _svrev_b32(op: svbool4_t) -> svbool4_t; } unsafe { _svrev_b32(op.sve_into()).sve_into() } @@ -35247,7 +35247,7 @@ pub fn svrev_b32(op: svbool_t) -> svbool_t { #[cfg_attr(test, assert_instr(rev))] pub fn svrev_b64(op: svbool_t) -> svbool_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i1")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse.nxv2i1")] fn _svrev_b64(op: svbool2_t) -> svbool2_t; } unsafe { _svrev_b64(op.sve_into()).sve_into() } @@ -35260,7 +35260,7 @@ pub fn svrev_b64(op: svbool_t) -> svbool_t { #[cfg_attr(test, assert_instr(rev))] pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4f32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse.nxv4f32")] fn _svrev_f32(op: svfloat32_t) -> svfloat32_t; } unsafe { _svrev_f32(op) } @@ -35273,7 +35273,7 @@ pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t { #[cfg_attr(test, assert_instr(rev))] pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2f64")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse.nxv2f64")] fn _svrev_f64(op: svfloat64_t) -> svfloat64_t; } unsafe { _svrev_f64(op) } @@ -35286,7 +35286,7 @@ pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t { #[cfg_attr(test, assert_instr(rev))] pub fn svrev_s8(op: svint8_t) -> svint8_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv16i8")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse.nxv16i8")] fn _svrev_s8(op: svint8_t) -> svint8_t; } unsafe { _svrev_s8(op) } @@ -35299,7 +35299,7 @@ pub fn svrev_s8(op: svint8_t) -> svint8_t { #[cfg_attr(test, assert_instr(rev))] pub fn svrev_s16(op: svint16_t) -> svint16_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv8i16")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse.nxv8i16")] fn _svrev_s16(op: svint16_t) -> svint16_t; } unsafe { _svrev_s16(op) } @@ -35312,7 +35312,7 @@ pub fn svrev_s16(op: svint16_t) -> svint16_t { #[cfg_attr(test, assert_instr(rev))] pub fn svrev_s32(op: svint32_t) -> svint32_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv4i32")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse.nxv4i32")] fn _svrev_s32(op: svint32_t) -> svint32_t; } unsafe { _svrev_s32(op) } @@ -35325,7 +35325,7 @@ pub fn svrev_s32(op: svint32_t) -> svint32_t { #[cfg_attr(test, assert_instr(rev))] pub fn svrev_s64(op: svint64_t) -> svint64_t { unsafe extern "unadjusted" { - #[cfg_attr(target_arch = "aarch64", link_name = "llvm.aarch64.sve.rev.nxv2i64")] + #[cfg_attr(target_arch = "aarch64", link_name = "llvm.vector.reverse.nxv2i64")] fn _svrev_s64(op: svint64_t) -> svint64_t; } unsafe { _svrev_s64(op) } diff --git a/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs b/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs index 6dd4e6b33a3ba..dd211854afbb4 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512bitalg.rs @@ -28,12 +28,12 @@ use stdarch_test::assert_instr; #[allow(improper_ctypes)] unsafe extern "C" { - #[link_name = "llvm.x86.avx512.mask.vpshufbitqmb.512"] - fn bitshuffle_512(data: i8x64, indices: i8x64, mask: __mmask64) -> __mmask64; - #[link_name = "llvm.x86.avx512.mask.vpshufbitqmb.256"] - fn bitshuffle_256(data: i8x32, indices: i8x32, mask: __mmask32) -> __mmask32; - #[link_name = "llvm.x86.avx512.mask.vpshufbitqmb.128"] - fn bitshuffle_128(data: i8x16, indices: i8x16, mask: __mmask16) -> __mmask16; + #[link_name = "llvm.x86.avx512.vpshufbitqmb.512"] + fn bitshuffle_512(data: i8x64, indices: i8x64) -> __mmask64; + #[link_name = "llvm.x86.avx512.vpshufbitqmb.256"] + fn bitshuffle_256(data: i8x32, indices: i8x32) -> __mmask32; + #[link_name = "llvm.x86.avx512.vpshufbitqmb.128"] + fn bitshuffle_128(data: i8x16, indices: i8x16) -> __mmask16; } /// For each packed 16-bit integer maps the value to the number of logical 1 bits. @@ -370,7 +370,7 @@ pub const fn _mm_mask_popcnt_epi8(src: __m128i, k: __mmask16, a: __m128i) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub fn _mm512_bitshuffle_epi64_mask(b: __m512i, c: __m512i) -> __mmask64 { - unsafe { bitshuffle_512(b.as_i8x64(), c.as_i8x64(), !0) } + unsafe { bitshuffle_512(b.as_i8x64(), c.as_i8x64()) } } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. @@ -386,7 +386,7 @@ pub fn _mm512_bitshuffle_epi64_mask(b: __m512i, c: __m512i) -> __mmask64 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub fn _mm512_mask_bitshuffle_epi64_mask(k: __mmask64, b: __m512i, c: __m512i) -> __mmask64 { - unsafe { bitshuffle_512(b.as_i8x64(), c.as_i8x64(), k) } + _mm512_bitshuffle_epi64_mask(b, c) & k } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. @@ -399,7 +399,7 @@ pub fn _mm512_mask_bitshuffle_epi64_mask(k: __mmask64, b: __m512i, c: __m512i) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub fn _mm256_bitshuffle_epi64_mask(b: __m256i, c: __m256i) -> __mmask32 { - unsafe { bitshuffle_256(b.as_i8x32(), c.as_i8x32(), !0) } + unsafe { bitshuffle_256(b.as_i8x32(), c.as_i8x32()) } } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. @@ -415,7 +415,7 @@ pub fn _mm256_bitshuffle_epi64_mask(b: __m256i, c: __m256i) -> __mmask32 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub fn _mm256_mask_bitshuffle_epi64_mask(k: __mmask32, b: __m256i, c: __m256i) -> __mmask32 { - unsafe { bitshuffle_256(b.as_i8x32(), c.as_i8x32(), k) } + _mm256_bitshuffle_epi64_mask(b, c) & k } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. @@ -428,7 +428,7 @@ pub fn _mm256_mask_bitshuffle_epi64_mask(k: __mmask32, b: __m256i, c: __m256i) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub fn _mm_bitshuffle_epi64_mask(b: __m128i, c: __m128i) -> __mmask16 { - unsafe { bitshuffle_128(b.as_i8x16(), c.as_i8x16(), !0) } + unsafe { bitshuffle_128(b.as_i8x16(), c.as_i8x16()) } } /// Considers the input `b` as packed 64-bit integers and `c` as packed 8-bit integers. @@ -444,7 +444,7 @@ pub fn _mm_bitshuffle_epi64_mask(b: __m128i, c: __m128i) -> __mmask16 { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vpshufbitqmb))] pub fn _mm_mask_bitshuffle_epi64_mask(k: __mmask16, b: __m128i, c: __m128i) -> __mmask16 { - unsafe { bitshuffle_128(b.as_i8x16(), c.as_i8x16(), k) } + _mm_bitshuffle_epi64_mask(b, c) & k } #[cfg(test)] diff --git a/library/stdarch/crates/core_arch/src/x86/avx512dq.rs b/library/stdarch/crates/core_arch/src/x86/avx512dq.rs index 9e1a4c0b29558..0b322c8b83c7b 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512dq.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512dq.rs @@ -6865,7 +6865,7 @@ pub fn _mm_maskz_reduce_ss(k: __mmask8, a: __m128, b: __m128) - #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_fpclass_pd_mask(a: __m128d) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); - _mm_mask_fpclass_pd_mask::(0xff, a) + unsafe { vfpclasspd_128(a.as_f64x2(), IMM8) } } /// Test packed double-precision (64-bit) floating-point elements in a for special categories specified @@ -6889,10 +6889,7 @@ pub fn _mm_fpclass_pd_mask(a: __m128d) -> __mmask8 { #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_fpclass_pd_mask(k1: __mmask8, a: __m128d) -> __mmask8 { - unsafe { - static_assert_uimm_bits!(IMM8, 8); - transmute(vfpclasspd_128(a.as_f64x2(), IMM8, k1)) - } + _mm_fpclass_pd_mask::(a) & k1 } /// Test packed double-precision (64-bit) floating-point elements in a for special categories specified @@ -6916,7 +6913,7 @@ pub fn _mm_mask_fpclass_pd_mask(k1: __mmask8, a: __m128d) -> __ #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_fpclass_pd_mask(a: __m256d) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); - _mm256_mask_fpclass_pd_mask::(0xff, a) + unsafe { vfpclasspd_256(a.as_f64x4(), IMM8) } } /// Test packed double-precision (64-bit) floating-point elements in a for special categories specified @@ -6940,10 +6937,7 @@ pub fn _mm256_fpclass_pd_mask(a: __m256d) -> __mmask8 { #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_fpclass_pd_mask(k1: __mmask8, a: __m256d) -> __mmask8 { - unsafe { - static_assert_uimm_bits!(IMM8, 8); - transmute(vfpclasspd_256(a.as_f64x4(), IMM8, k1)) - } + _mm256_fpclass_pd_mask::(a) & k1 } /// Test packed double-precision (64-bit) floating-point elements in a for special categories specified @@ -6967,7 +6961,7 @@ pub fn _mm256_mask_fpclass_pd_mask(k1: __mmask8, a: __m256d) -> #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_fpclass_pd_mask(a: __m512d) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); - _mm512_mask_fpclass_pd_mask::(0xff, a) + unsafe { vfpclasspd_512(a.as_f64x8(), IMM8) } } /// Test packed double-precision (64-bit) floating-point elements in a for special categories specified @@ -6991,10 +6985,7 @@ pub fn _mm512_fpclass_pd_mask(a: __m512d) -> __mmask8 { #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_fpclass_pd_mask(k1: __mmask8, a: __m512d) -> __mmask8 { - unsafe { - static_assert_uimm_bits!(IMM8, 8); - transmute(vfpclasspd_512(a.as_f64x8(), IMM8, k1)) - } + _mm512_fpclass_pd_mask::(a) & k1 } /// Test packed single-precision (32-bit) floating-point elements in a for special categories specified @@ -7018,7 +7009,7 @@ pub fn _mm512_mask_fpclass_pd_mask(k1: __mmask8, a: __m512d) -> #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_fpclass_ps_mask(a: __m128) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); - _mm_mask_fpclass_ps_mask::(0xff, a) + unsafe { vfpclassps_128(a.as_f32x4(), IMM8) } } /// Test packed single-precision (32-bit) floating-point elements in a for special categories specified @@ -7042,10 +7033,7 @@ pub fn _mm_fpclass_ps_mask(a: __m128) -> __mmask8 { #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm_mask_fpclass_ps_mask(k1: __mmask8, a: __m128) -> __mmask8 { - unsafe { - static_assert_uimm_bits!(IMM8, 8); - transmute(vfpclassps_128(a.as_f32x4(), IMM8, k1)) - } + _mm_fpclass_ps_mask::(a) & k1 } /// Test packed single-precision (32-bit) floating-point elements in a for special categories specified @@ -7069,7 +7057,7 @@ pub fn _mm_mask_fpclass_ps_mask(k1: __mmask8, a: __m128) -> __m #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_fpclass_ps_mask(a: __m256) -> __mmask8 { static_assert_uimm_bits!(IMM8, 8); - _mm256_mask_fpclass_ps_mask::(0xff, a) + unsafe { vfpclassps_256(a.as_f32x8(), IMM8) } } /// Test packed single-precision (32-bit) floating-point elements in a for special categories specified @@ -7093,10 +7081,7 @@ pub fn _mm256_fpclass_ps_mask(a: __m256) -> __mmask8 { #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm256_mask_fpclass_ps_mask(k1: __mmask8, a: __m256) -> __mmask8 { - unsafe { - static_assert_uimm_bits!(IMM8, 8); - transmute(vfpclassps_256(a.as_f32x8(), IMM8, k1)) - } + _mm256_fpclass_ps_mask::(a) & k1 } /// Test packed single-precision (32-bit) floating-point elements in a for special categories specified @@ -7120,7 +7105,7 @@ pub fn _mm256_mask_fpclass_ps_mask(k1: __mmask8, a: __m256) -> #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_fpclass_ps_mask(a: __m512) -> __mmask16 { static_assert_uimm_bits!(IMM8, 8); - _mm512_mask_fpclass_ps_mask::(0xffff, a) + unsafe { vfpclassps_512(a.as_f32x16(), IMM8) } } /// Test packed single-precision (32-bit) floating-point elements in a for special categories specified @@ -7144,10 +7129,7 @@ pub fn _mm512_fpclass_ps_mask(a: __m512) -> __mmask16 { #[rustc_legacy_const_generics(2)] #[stable(feature = "stdarch_x86_avx512", since = "1.89")] pub fn _mm512_mask_fpclass_ps_mask(k1: __mmask16, a: __m512) -> __mmask16 { - unsafe { - static_assert_uimm_bits!(IMM8, 8); - transmute(vfpclassps_512(a.as_f32x16(), IMM8, k1)) - } + _mm512_fpclass_ps_mask::(a) & k1 } /// Test the lower double-precision (64-bit) floating-point element in a for special categories specified @@ -7377,19 +7359,19 @@ unsafe extern "C" { #[link_name = "llvm.x86.avx512.mask.reduce.ss"] fn vreducess(a: f32x4, b: f32x4, src: f32x4, k: __mmask8, imm8: i32, sae: i32) -> f32x4; - #[link_name = "llvm.x86.avx512.mask.fpclass.pd.128"] - fn vfpclasspd_128(a: f64x2, imm8: i32, k: __mmask8) -> __mmask8; - #[link_name = "llvm.x86.avx512.mask.fpclass.pd.256"] - fn vfpclasspd_256(a: f64x4, imm8: i32, k: __mmask8) -> __mmask8; - #[link_name = "llvm.x86.avx512.mask.fpclass.pd.512"] - fn vfpclasspd_512(a: f64x8, imm8: i32, k: __mmask8) -> __mmask8; - - #[link_name = "llvm.x86.avx512.mask.fpclass.ps.128"] - fn vfpclassps_128(a: f32x4, imm8: i32, k: __mmask8) -> __mmask8; - #[link_name = "llvm.x86.avx512.mask.fpclass.ps.256"] - fn vfpclassps_256(a: f32x8, imm8: i32, k: __mmask8) -> __mmask8; - #[link_name = "llvm.x86.avx512.mask.fpclass.ps.512"] - fn vfpclassps_512(a: f32x16, imm8: i32, k: __mmask16) -> __mmask16; + #[link_name = "llvm.x86.avx512.fpclass.pd.128"] + fn vfpclasspd_128(a: f64x2, imm8: i32) -> __mmask8; + #[link_name = "llvm.x86.avx512.fpclass.pd.256"] + fn vfpclasspd_256(a: f64x4, imm8: i32) -> __mmask8; + #[link_name = "llvm.x86.avx512.fpclass.pd.512"] + fn vfpclasspd_512(a: f64x8, imm8: i32) -> __mmask8; + + #[link_name = "llvm.x86.avx512.fpclass.ps.128"] + fn vfpclassps_128(a: f32x4, imm8: i32) -> __mmask8; + #[link_name = "llvm.x86.avx512.fpclass.ps.256"] + fn vfpclassps_256(a: f32x8, imm8: i32) -> __mmask8; + #[link_name = "llvm.x86.avx512.fpclass.ps.512"] + fn vfpclassps_512(a: f32x16, imm8: i32) -> __mmask16; #[link_name = "llvm.x86.avx512.mask.fpclass.sd"] fn vfpclasssd(a: f64x2, imm8: i32, k: __mmask8) -> __mmask8; diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs index 0c725402a9176..2c5002a2d000d 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs @@ -16242,7 +16242,7 @@ pub fn _mm512_maskz_cvttps_epi32(k: __mmask16, a: __m512) -> __m512i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub fn _mm256_mask_cvttps_epi32(src: __m256i, k: __mmask8, a: __m256) -> __m256i { - unsafe { transmute(vcvttps2dq256(a.as_f32x8(), src.as_i32x8(), k)) } + unsafe { simd_select_bitmask(k, _mm256_cvttps_epi32(a).as_i32x8(), src.as_i32x8()).as_m256i() } } /// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). @@ -16253,7 +16253,7 @@ pub fn _mm256_mask_cvttps_epi32(src: __m256i, k: __mmask8, a: __m256) -> __m256i #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub fn _mm256_maskz_cvttps_epi32(k: __mmask8, a: __m256) -> __m256i { - unsafe { transmute(vcvttps2dq256(a.as_f32x8(), i32x8::ZERO, k)) } + _mm256_mask_cvttps_epi32(_mm256_setzero_si256(), k, a) } /// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). @@ -16264,7 +16264,7 @@ pub fn _mm256_maskz_cvttps_epi32(k: __mmask8, a: __m256) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub fn _mm_mask_cvttps_epi32(src: __m128i, k: __mmask8, a: __m128) -> __m128i { - unsafe { transmute(vcvttps2dq128(a.as_f32x4(), src.as_i32x4(), k)) } + unsafe { simd_select_bitmask(k, _mm_cvttps_epi32(a).as_i32x4(), src.as_i32x4()).as_m128i() } } /// Convert packed single-precision (32-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). @@ -16275,7 +16275,7 @@ pub fn _mm_mask_cvttps_epi32(src: __m128i, k: __mmask8, a: __m128) -> __m128i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttps2dq))] pub fn _mm_maskz_cvttps_epi32(k: __mmask8, a: __m128) -> __m128i { - unsafe { transmute(vcvttps2dq128(a.as_f32x4(), i32x4::ZERO, k)) } + _mm_mask_cvttps_epi32(_mm_setzero_si128(), k, a) } /// Convert packed single-precision (32-bit) floating-point elements in a to packed unsigned 32-bit integers with truncation, and store the results in dst. @@ -16478,7 +16478,7 @@ pub fn _mm512_maskz_cvttpd_epi32(k: __mmask8, a: __m512d) -> __m256i { #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttpd2dq))] pub fn _mm256_mask_cvttpd_epi32(src: __m128i, k: __mmask8, a: __m256d) -> __m128i { - unsafe { transmute(vcvttpd2dq256(a.as_f64x4(), src.as_i32x4(), k)) } + unsafe { simd_select_bitmask(k, _mm256_cvttpd_epi32(a).as_i32x4(), src.as_i32x4()).as_m128i() } } /// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using zeromask k (elements are zeroed out when the corresponding mask bit is not set). @@ -16489,7 +16489,7 @@ pub fn _mm256_mask_cvttpd_epi32(src: __m128i, k: __mmask8, a: __m256d) -> __m128 #[stable(feature = "stdarch_x86_avx512", since = "1.89")] #[cfg_attr(test, assert_instr(vcvttpd2dq))] pub fn _mm256_maskz_cvttpd_epi32(k: __mmask8, a: __m256d) -> __m128i { - unsafe { transmute(vcvttpd2dq256(a.as_f64x4(), i32x4::ZERO, k)) } + _mm256_mask_cvttpd_epi32(_mm_setzero_si128(), k, a) } /// Convert packed double-precision (64-bit) floating-point elements in a to packed 32-bit integers with truncation, and store the results in dst using writemask k (elements are copied from src when the corresponding mask bit is not set). @@ -44430,10 +44430,6 @@ unsafe extern "C" { #[link_name = "llvm.x86.avx512.mask.cvttps2dq.512"] fn vcvttps2dq(a: f32x16, src: i32x16, mask: u16, rounding: i32) -> i32x16; - #[link_name = "llvm.x86.avx512.mask.cvttps2dq.256"] - fn vcvttps2dq256(a: f32x8, src: i32x8, mask: u8) -> i32x8; - #[link_name = "llvm.x86.avx512.mask.cvttps2dq.128"] - fn vcvttps2dq128(a: f32x4, src: i32x4, mask: u8) -> i32x4; #[link_name = "llvm.x86.avx512.mask.cvttps2udq.512"] fn vcvttps2udq(a: f32x16, src: u32x16, mask: u16, rounding: i32) -> u32x16; @@ -44444,8 +44440,6 @@ unsafe extern "C" { #[link_name = "llvm.x86.avx512.mask.cvttpd2dq.512"] fn vcvttpd2dq(a: f64x8, src: i32x8, mask: u8, rounding: i32) -> i32x8; - #[link_name = "llvm.x86.avx512.mask.cvttpd2dq.256"] - fn vcvttpd2dq256(a: f64x4, src: i32x4, mask: u8) -> i32x4; #[link_name = "llvm.x86.avx512.mask.cvttpd2dq.128"] fn vcvttpd2dq128(a: f64x2, src: i32x4, mask: u8) -> i32x4; diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml index 383e50b7cc70c..29dd3a095d7e6 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/sve/aarch64.spec.yml @@ -562,7 +562,7 @@ intrinsics: assert_instr: [] compose: - LLVMLink: - name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + name: llvm.vector.insert.{sve_type}.{neon_type} arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] - Let: - op @@ -657,7 +657,7 @@ intrinsics: assert_instr: [] compose: - LLVMLink: - name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + name: llvm.vector.insert.{sve_type}.{neon_type} arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] - Let: - op @@ -713,7 +713,7 @@ intrinsics: assert_instr: [] compose: - LLVMLink: - name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + name: llvm.vector.insert.{sve_type}.{neon_type} arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] - Let: - op @@ -755,7 +755,7 @@ intrinsics: assert_instr: [] compose: - LLVMLink: - name: llvm.experimental.vector.insert.{sve_type}.{neon_type} + name: llvm.vector.insert.{sve_type}.{neon_type} arguments: ["op0: {sve_type}", "op1: {neon_type}", "idx: i64"] - Let: - op @@ -1210,7 +1210,7 @@ intrinsics: types: [f32, f64, i8, i16, i32, i64, u8, u16, u32, u64] assert_instr: [rev] compose: - - LLVMLink: { name: "rev.{sve_type}" } + - LLVMLink: { name: "llvm.vector.reverse.{sve_type}" } - name: svrev_{type} attr: [*sve-unstable] @@ -1220,7 +1220,7 @@ intrinsics: types: [b8, b16, b32, b64] assert_instr: [rev] compose: - - LLVMLink: { name: "rev.{sve_type}" } + - LLVMLink: { name: "llvm.vector.reverse.{sve_type}" } - name: svrevb[_{type}]{_mxz} attr: [*sve-unstable] From d3cdaeeb8a0b926ac059ff1a7cce1d94d55da3dc Mon Sep 17 00:00:00 2001 From: sayantn Date: Tue, 14 Apr 2026 04:38:51 +0530 Subject: [PATCH 13/30] Implement AVX512-VP2INTERSECT intrinsics --- .../stdarch/crates/core_arch/missing-x86.md | 16 -- .../core_arch/src/x86/avx512vp2intersect.rs | 244 ++++++++++++++++++ .../stdarch/crates/core_arch/src/x86/mod.rs | 4 + 3 files changed, 248 insertions(+), 16 deletions(-) create mode 100644 library/stdarch/crates/core_arch/src/x86/avx512vp2intersect.rs diff --git a/library/stdarch/crates/core_arch/missing-x86.md b/library/stdarch/crates/core_arch/missing-x86.md index 640ec7d0fe7d1..e9f68eb9e6abe 100644 --- a/library/stdarch/crates/core_arch/missing-x86.md +++ b/library/stdarch/crates/core_arch/missing-x86.md @@ -44,22 +44,6 @@

-
["AVX512_VP2INTERSECT", "AVX512F"]

- - * [ ] [`_mm512_2intersect_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_2intersect_epi32) - * [ ] [`_mm512_2intersect_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_2intersect_epi64) -

- - -
["AVX512_VP2INTERSECT", "AVX512VL"]

- - * [ ] [`_mm256_2intersect_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_2intersect_epi32) - * [ ] [`_mm256_2intersect_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_2intersect_epi64) - * [ ] [`_mm_2intersect_epi32`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_2intersect_epi32) - * [ ] [`_mm_2intersect_epi64`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_2intersect_epi64) -

- -
["CET_SS"]

* [ ] [`_clrssbsy`](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_clrssbsy) diff --git a/library/stdarch/crates/core_arch/src/x86/avx512vp2intersect.rs b/library/stdarch/crates/core_arch/src/x86/avx512vp2intersect.rs new file mode 100644 index 0000000000000..4dd7412e9e330 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/x86/avx512vp2intersect.rs @@ -0,0 +1,244 @@ +//! Vector Pair Intersection to a Pair of Mask Registers (VP2INTERSECT) + +use crate::core_arch::{simd::*, x86::*}; + +#[cfg(test)] +use stdarch_test::assert_instr; + +/// Compute intersection of packed 32-bit integer vectors a and b, +/// and store indication of match in the corresponding bit of two mask registers +/// specified by k1 and k2. A match in corresponding elements of a and b is +/// indicated by a set bit in the corresponding bit of the mask registers. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_2intersect_epi32&expand=0) +#[inline] +#[target_feature(enable = "avx512vp2intersect,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512vp2intersect", issue = "111137")] +#[cfg_attr(test, assert_instr(vp2intersectd))] +pub unsafe fn _mm_2intersect_epi32(a: __m128i, b: __m128i, k1: *mut __mmask8, k2: *mut __mmask8) { + (*k1, *k2) = vp2intersectd_128(a.as_i32x4(), b.as_i32x4()); +} + +/// Compute intersection of packed 64-bit integer vectors a and b, +/// and store indication of match in the corresponding bit of two mask registers +/// specified by k1 and k2. A match in corresponding elements of a and b is +/// indicated by a set bit in the corresponding bit of the mask registers. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm_2intersect_epi64&expand=0) +#[inline] +#[target_feature(enable = "avx512vp2intersect,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512vp2intersect", issue = "111137")] +#[cfg_attr(test, assert_instr(vp2intersectq))] +pub unsafe fn _mm_2intersect_epi64(a: __m128i, b: __m128i, k1: *mut __mmask8, k2: *mut __mmask8) { + (*k1, *k2) = vp2intersectq_128(a.as_i64x2(), b.as_i64x2()); +} + +/// Compute intersection of packed 32-bit integer vectors a and b, +/// and store indication of match in the corresponding bit of two mask registers +/// specified by k1 and k2. A match in corresponding elements of a and b is +/// indicated by a set bit in the corresponding bit of the mask registers. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_2intersect_epi32&expand=0) +#[inline] +#[target_feature(enable = "avx512vp2intersect,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512vp2intersect", issue = "111137")] +#[cfg_attr(test, assert_instr(vp2intersectd))] +pub unsafe fn _mm256_2intersect_epi32( + a: __m256i, + b: __m256i, + k1: *mut __mmask8, + k2: *mut __mmask8, +) { + (*k1, *k2) = vp2intersectd_256(a.as_i32x8(), b.as_i32x8()); +} + +/// Compute intersection of packed 64-bit integer vectors a and b, +/// and store indication of match in the corresponding bit of two mask registers +/// specified by k1 and k2. A match in corresponding elements of a and b is +/// indicated by a set bit in the corresponding bit of the mask registers. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm256_2intersect_epi64&expand=0) +#[inline] +#[target_feature(enable = "avx512vp2intersect,avx512vl")] +#[unstable(feature = "stdarch_x86_avx512vp2intersect", issue = "111137")] +#[cfg_attr(test, assert_instr(vp2intersectq))] +pub unsafe fn _mm256_2intersect_epi64( + a: __m256i, + b: __m256i, + k1: *mut __mmask8, + k2: *mut __mmask8, +) { + (*k1, *k2) = vp2intersectq_256(a.as_i64x4(), b.as_i64x4()); +} + +/// Compute intersection of packed 32-bit integer vectors a and b, +/// and store indication of match in the corresponding bit of two mask registers +/// specified by k1 and k2. A match in corresponding elements of a and b is +/// indicated by a set bit in the corresponding bit of the mask registers. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_2intersect_epi32&expand=0) +#[inline] +#[target_feature(enable = "avx512vp2intersect,avx512f")] +#[unstable(feature = "stdarch_x86_avx512vp2intersect", issue = "111137")] +#[cfg_attr(test, assert_instr(vp2intersectd))] +pub unsafe fn _mm512_2intersect_epi32( + a: __m512i, + b: __m512i, + k1: *mut __mmask16, + k2: *mut __mmask16, +) { + (*k1, *k2) = vp2intersectd_512(a.as_i32x16(), b.as_i32x16()); +} + +/// Compute intersection of packed 64-bit integer vectors a and b, +/// and store indication of match in the corresponding bit of two mask registers +/// specified by k1 and k2. A match in corresponding elements of a and b is +/// indicated by a set bit in the corresponding bit of the mask registers. +/// +/// [Intel's documentation](https://software.intel.com/sites/landingpage/IntrinsicsGuide/#text=_mm512_2intersect_epi64&expand=0) +#[inline] +#[target_feature(enable = "avx512vp2intersect,avx512f")] +#[unstable(feature = "stdarch_x86_avx512vp2intersect", issue = "111137")] +#[cfg_attr(test, assert_instr(vp2intersectq))] +pub unsafe fn _mm512_2intersect_epi64( + a: __m512i, + b: __m512i, + k1: *mut __mmask8, + k2: *mut __mmask8, +) { + (*k1, *k2) = vp2intersectq_512(a.as_i64x8(), b.as_i64x8()); +} + +#[allow(improper_ctypes)] +unsafe extern "C" { + #[link_name = "llvm.x86.avx512.vp2intersect.d.128"] + fn vp2intersectd_128(a: i32x4, b: i32x4) -> (u8, u8); + #[link_name = "llvm.x86.avx512.vp2intersect.q.128"] + fn vp2intersectq_128(a: i64x2, b: i64x2) -> (u8, u8); + + #[link_name = "llvm.x86.avx512.vp2intersect.d.256"] + fn vp2intersectd_256(a: i32x8, b: i32x8) -> (u8, u8); + #[link_name = "llvm.x86.avx512.vp2intersect.q.256"] + fn vp2intersectq_256(a: i64x4, b: i64x4) -> (u8, u8); + + #[link_name = "llvm.x86.avx512.vp2intersect.d.512"] + fn vp2intersectd_512(a: i32x16, b: i32x16) -> (u16, u16); + #[link_name = "llvm.x86.avx512.vp2intersect.q.512"] + fn vp2intersectq_512(a: i64x8, b: i64x8) -> (u8, u8); +} + +#[cfg(test)] +mod tests { + use crate::core_arch::x86::*; + use stdarch_test::simd_test; + + #[simd_test(enable = "avx512vp2intersect,avx512vl")] + unsafe fn test_mm_2intersect_epi32() { + let mut k1 = 0; + let mut k2 = 0; + + let a = _mm_set_epi32(1, 2, 3, 4); + let b = _mm_set_epi32(3, 4, 5, 6); + _mm_2intersect_epi32(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b0011); + assert_eq!(k2, 0b1100); + + let a = _mm_set_epi32(1, 2, 3, 4); + let b = _mm_set_epi32(2, 3, 4, 5); + _mm_2intersect_epi32(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b0111); + assert_eq!(k2, 0b1110); + } + + #[simd_test(enable = "avx512vp2intersect,avx512vl")] + unsafe fn test_mm_2intersect_epi64() { + let mut k1 = 0; + let mut k2 = 0; + + let a = _mm_set_epi64x(1, 2); + let b = _mm_set_epi64x(2, 3); + _mm_2intersect_epi64(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b01); + assert_eq!(k2, 0b10); + + let a = _mm_set_epi64x(1, 2); + let b = _mm_set_epi64x(2, 2); + _mm_2intersect_epi64(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b01); + assert_eq!(k2, 0b11); + } + + #[simd_test(enable = "avx512vp2intersect,avx512vl")] + unsafe fn test_mm256_2intersect_epi32() { + let mut k1 = 0; + let mut k2 = 0; + + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let b = _mm256_set_epi32(5, 6, 7, 8, 9, 10, 11, 12); + _mm256_2intersect_epi32(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b00001111); + assert_eq!(k2, 0b11110000); + + let a = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8); + let b = _mm256_set_epi32(2, 3, 4, 5, 6, 7, 8, 9); + _mm256_2intersect_epi32(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b01111111); + assert_eq!(k2, 0b11111110); + } + + #[simd_test(enable = "avx512vp2intersect,avx512vl")] + unsafe fn test_mm256_2intersect_epi64() { + let mut k1 = 0; + let mut k2 = 0; + + let a = _mm256_set_epi64x(1, 2, 3, 4); + let b = _mm256_set_epi64x(3, 4, 5, 6); + _mm256_2intersect_epi64(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b0011); + assert_eq!(k2, 0b1100); + + let a = _mm256_set_epi64x(1, 2, 3, 4); + let b = _mm256_set_epi64x(2, 3, 4, 5); + _mm256_2intersect_epi64(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b0111); + assert_eq!(k2, 0b1110); + } + + #[simd_test(enable = "avx512vp2intersect,avx512f")] + unsafe fn test_mm512_2intersect_epi32() { + let mut k1 = 0; + let mut k2 = 0; + + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let b = _mm512_set_epi32( + 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + ); + _mm512_2intersect_epi32(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b0000000011111111); + assert_eq!(k2, 0b1111111100000000); + + let a = _mm512_set_epi32(1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16); + let b = _mm512_set_epi32(2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17); + _mm512_2intersect_epi32(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b0111111111111111); + assert_eq!(k2, 0b1111111111111110); + } + + #[simd_test(enable = "avx512vp2intersect,avx512f")] + unsafe fn test_mm512_2intersect_epi64() { + let mut k1 = 0; + let mut k2 = 0; + + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let b = _mm512_set_epi64(5, 6, 7, 8, 9, 10, 11, 12); + _mm512_2intersect_epi64(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b00001111); + assert_eq!(k2, 0b11110000); + + let a = _mm512_set_epi64(1, 2, 3, 4, 5, 6, 7, 8); + let b = _mm512_set_epi64(2, 3, 4, 5, 6, 7, 8, 9); + _mm512_2intersect_epi64(a, b, &mut k1, &mut k2); + assert_eq!(k1, 0b01111111); + assert_eq!(k2, 0b11111110); + } +} diff --git a/library/stdarch/crates/core_arch/src/x86/mod.rs b/library/stdarch/crates/core_arch/src/x86/mod.rs index 68a963f65b7d4..f5a8acbd8fac4 100644 --- a/library/stdarch/crates/core_arch/src/x86/mod.rs +++ b/library/stdarch/crates/core_arch/src/x86/mod.rs @@ -778,3 +778,7 @@ pub use self::kl::*; mod movrs; #[unstable(feature = "movrs_target_feature", issue = "137976")] pub use self::movrs::*; + +mod avx512vp2intersect; +#[unstable(feature = "stdarch_x86_avx512vp2intersect", issue = "111137")] +pub use self::avx512vp2intersect::*; From 2c9fc725eacb343ddafe448d4ca547e9989569d8 Mon Sep 17 00:00:00 2001 From: sayantn Date: Sun, 19 Apr 2026 22:53:43 +0530 Subject: [PATCH 14/30] Add AMX-AVX512 BF16 intrinsics --- .../crates/core_arch/src/x86_64/amx.rs | 178 ++++++++++++++++++ 1 file changed, 178 insertions(+) diff --git a/library/stdarch/crates/core_arch/src/x86_64/amx.rs b/library/stdarch/crates/core_arch/src/x86_64/amx.rs index 03bbe3e449258..62e46097e6208 100644 --- a/library/stdarch/crates/core_arch/src/x86_64/amx.rs +++ b/library/stdarch/crates/core_arch/src/x86_64/amx.rs @@ -480,6 +480,72 @@ pub unsafe fn _tile_cvtrowps2phli() -> __m512h tcvtrowps2phli(TILE as i8, ROW as u32).as_m512h() } +/// Moves a row from a tile register to a zmm register, converting the packed single-precision (32-bit) +/// floating-point elements to packed BF16 (16-bit) floating-point elements. The resulting +/// 16-bit elements are placed in the high 16-bits within each 32-bit element of the returned vector. +#[inline] +#[rustc_legacy_const_generics(0)] +#[target_feature(enable = "amx-avx512,avx10.2")] +#[cfg_attr( + all(test, any(target_os = "linux", target_env = "msvc")), + assert_instr(tcvtrowps2bf16h, TILE = 0) +)] +#[unstable(feature = "x86_amx_intrinsics", issue = "126622")] +pub unsafe fn _tile_cvtrowps2bf16h(row: u32) -> __m512bh { + static_assert_uimm_bits!(TILE, 3); + tcvtrowps2bf16h(TILE as i8, row).as_m512bh() +} + +/// Moves a row from a tile register to a zmm register, converting the packed single-precision (32-bit) +/// floating-point elements to packed BF16 (16-bit) floating-point elements. The resulting +/// 16-bit elements are placed in the high 16-bits within each 32-bit element of the returned vector. +#[inline] +#[rustc_legacy_const_generics(0, 1)] +#[target_feature(enable = "amx-avx512,avx10.2")] +#[cfg_attr( + all(test, any(target_os = "linux", target_env = "msvc")), + assert_instr(tcvtrowps2bf16h, TILE = 0, ROW = 0) +)] +#[unstable(feature = "x86_amx_intrinsics", issue = "126622")] +pub unsafe fn _tile_cvtrowps2bf16hi() -> __m512bh { + static_assert_uimm_bits!(TILE, 3); + static_assert_uimm_bits!(ROW, 6); + tcvtrowps2bf16hi(TILE as i8, ROW as u32).as_m512bh() +} + +/// Moves a row from a tile register to a zmm register, converting the packed single-precision (32-bit) +/// floating-point elements to packed BF16 (16-bit) floating-point elements. The resulting +/// 16-bit elements are placed in the low 16-bits within each 32-bit element of the returned vector. +#[inline] +#[rustc_legacy_const_generics(0)] +#[target_feature(enable = "amx-avx512,avx10.2")] +#[cfg_attr( + all(test, any(target_os = "linux", target_env = "msvc")), + assert_instr(tcvtrowps2bf16l, TILE = 0) +)] +#[unstable(feature = "x86_amx_intrinsics", issue = "126622")] +pub unsafe fn _tile_cvtrowps2bf16l(row: u32) -> __m512bh { + static_assert_uimm_bits!(TILE, 3); + tcvtrowps2bf16l(TILE as i8, row).as_m512bh() +} + +/// Moves a row from a tile register to a zmm register, converting the packed single-precision (32-bit) +/// floating-point elements to packed BF16 (16-bit) floating-point elements. The resulting +/// 16-bit elements are placed in the low 16-bits within each 32-bit element of the returned vector. +#[inline] +#[rustc_legacy_const_generics(0, 1)] +#[target_feature(enable = "amx-avx512,avx10.2")] +#[cfg_attr( + all(test, any(target_os = "linux", target_env = "msvc")), + assert_instr(tcvtrowps2bf16l, TILE = 0, ROW = 0) +)] +#[unstable(feature = "x86_amx_intrinsics", issue = "126622")] +pub unsafe fn _tile_cvtrowps2bf16li() -> __m512bh { + static_assert_uimm_bits!(TILE, 3); + static_assert_uimm_bits!(ROW, 6); + tcvtrowps2bf16li(TILE as i8, ROW as u32).as_m512bh() +} + /// Moves one row of tile data into a zmm vector register #[inline] #[rustc_legacy_const_generics(0)] @@ -567,6 +633,14 @@ unsafe extern "C" { fn tcvtrowps2phl(tile: i8, row: u32) -> f16x32; #[link_name = "llvm.x86.tcvtrowps2phli"] fn tcvtrowps2phli(tile: i8, row: u32) -> f16x32; + #[link_name = "llvm.x86.tcvtrowps2bf16h"] + fn tcvtrowps2bf16h(tile: i8, row: u32) -> u16x32; + #[link_name = "llvm.x86.tcvtrowps2bf16hi"] + fn tcvtrowps2bf16hi(tile: i8, row: u32) -> u16x32; + #[link_name = "llvm.x86.tcvtrowps2bf16l"] + fn tcvtrowps2bf16l(tile: i8, row: u32) -> u16x32; + #[link_name = "llvm.x86.tcvtrowps2bf16li"] + fn tcvtrowps2bf16li(tile: i8, row: u32) -> u16x32; #[link_name = "llvm.x86.tilemovrow"] fn tilemovrow(tile: i8, row: u32) -> i32x16; #[link_name = "llvm.x86.tilemovrowi"] @@ -1276,6 +1350,110 @@ mod tests { } } + #[simd_test(enable = "amx-avx512,avx10.2")] + fn test_tile_cvtrowps2bf16h() { + unsafe { + _init_amx(); + let array: [[f32; 16]; 16] = array::from_fn(|i| [i as _; _]); + + let mut config = __tilecfg::default(); + config.palette = 1; + config.colsb[0] = 64; + config.rows[0] = 16; + _tile_loadconfig(config.as_ptr()); + _tile_loadd::<0>(array.as_ptr().cast(), 64); + for i in 0..16 { + let row = _tile_cvtrowps2bf16h::<0>(i); + assert_eq!( + *row.as_u16x32().as_array(), + array::from_fn(|j| if j & 1 == 0 { + 0 + } else { + _mm_cvtness_sbh(i as _).to_bits() + }) + ); + } + } + } + + #[simd_test(enable = "amx-avx512,avx10.2")] + fn test_tile_cvtrowps2bf16hi() { + unsafe { + _init_amx(); + let array: [[f32; 16]; 16] = array::from_fn(|i| [i as _; _]); + + let mut config = __tilecfg::default(); + config.palette = 1; + config.colsb[0] = 64; + config.rows[0] = 16; + _tile_loadconfig(config.as_ptr()); + _tile_loadd::<0>(array.as_ptr().cast(), 64); + for i in 0..16 { + let row = wrap_imm4!(_tile_cvtrowps2bf16hi::<0>, i); + assert_eq!( + *row.as_u16x32().as_array(), + array::from_fn(|j| if j & 1 == 0 { + 0 + } else { + _mm_cvtness_sbh(i as _).to_bits() + }) + ); + } + } + } + + #[simd_test(enable = "amx-avx512,avx10.2")] + fn test_tile_cvtrowps2bf16l() { + unsafe { + _init_amx(); + let array: [[f32; 16]; 16] = array::from_fn(|i| [i as _; _]); + + let mut config = __tilecfg::default(); + config.palette = 1; + config.colsb[0] = 64; + config.rows[0] = 16; + _tile_loadconfig(config.as_ptr()); + _tile_loadd::<0>(array.as_ptr().cast(), 64); + for i in 0..16 { + let row = _tile_cvtrowps2bf16l::<0>(i); + assert_eq!( + *row.as_u16x32().as_array(), + array::from_fn(|j| if j & 1 == 0 { + _mm_cvtness_sbh(i as _).to_bits() + } else { + 0 + }) + ); + } + } + } + + #[simd_test(enable = "amx-avx512,avx10.2")] + fn test_tile_cvtrowps2bf16li() { + unsafe { + _init_amx(); + let array: [[f32; 16]; 16] = array::from_fn(|i| [i as _; _]); + + let mut config = __tilecfg::default(); + config.palette = 1; + config.colsb[0] = 64; + config.rows[0] = 16; + _tile_loadconfig(config.as_ptr()); + _tile_loadd::<0>(array.as_ptr().cast(), 64); + for i in 0..16 { + let row = wrap_imm4!(_tile_cvtrowps2bf16li::<0>, i); + assert_eq!( + *row.as_u16x32().as_array(), + array::from_fn(|j| if j & 1 == 0 { + _mm_cvtness_sbh(i as _).to_bits() + } else { + 0 + }) + ); + } + } + } + #[simd_test(enable = "amx-tf32")] fn test_tile_mmultf32ps() { unsafe { From c1a63b477626546be06a025ef8af6dab9549a9ed Mon Sep 17 00:00:00 2001 From: The rustc-josh-sync Cronjob Bot Date: Mon, 20 Apr 2026 05:09:37 +0000 Subject: [PATCH 15/30] Prepare for merging from rust-lang/rust This updates the rust-version file to e22c616e4e87914135c1db261a03e0437255335e. --- library/stdarch/rust-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/stdarch/rust-version b/library/stdarch/rust-version index a8efb5c477c1f..e9fc6c4cd023e 100644 --- a/library/stdarch/rust-version +++ b/library/stdarch/rust-version @@ -1 +1 @@ -e4fdb554ad2c0270473181438e338c42b5b30b0c +e22c616e4e87914135c1db261a03e0437255335e From 476ff35feb1c61de2049eea40d0be30dcfe10684 Mon Sep 17 00:00:00 2001 From: sayantn Date: Tue, 21 Apr 2026 16:49:09 +0530 Subject: [PATCH 16/30] enable AMX instruction tests in windows-gnu --- .../crates/core_arch/src/x86_64/amx.rs | 38 +++++++++---------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86_64/amx.rs b/library/stdarch/crates/core_arch/src/x86_64/amx.rs index 62e46097e6208..b3b3e86750ef2 100644 --- a/library/stdarch/crates/core_arch/src/x86_64/amx.rs +++ b/library/stdarch/crates/core_arch/src/x86_64/amx.rs @@ -252,7 +252,7 @@ pub unsafe fn _tile_cmmrlfp16ps() { #[rustc_legacy_const_generics(0, 1, 2)] #[target_feature(enable = "amx-fp8")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tdpbf8ps, DST = 0, A = 1, B = 2) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -271,7 +271,7 @@ pub unsafe fn _tile_dpbf8ps() { #[rustc_legacy_const_generics(0, 1, 2)] #[target_feature(enable = "amx-fp8")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tdpbhf8ps, DST = 0, A = 1, B = 2) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -290,7 +290,7 @@ pub unsafe fn _tile_dpbhf8ps() { #[rustc_legacy_const_generics(0, 1, 2)] #[target_feature(enable = "amx-fp8")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tdphbf8ps, DST = 0, A = 1, B = 2) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -309,7 +309,7 @@ pub unsafe fn _tile_dphbf8ps() { #[rustc_legacy_const_generics(0, 1, 2)] #[target_feature(enable = "amx-fp8")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tdphf8ps, DST = 0, A = 1, B = 2) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -329,7 +329,7 @@ pub unsafe fn _tile_dphf8ps() { #[rustc_legacy_const_generics(0)] #[target_feature(enable = "amx-movrs")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tileloaddrs, DST = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -349,7 +349,7 @@ pub unsafe fn _tile_loaddrs(base: *const u8, stride: usize) { #[rustc_legacy_const_generics(0)] #[target_feature(enable = "amx-movrs")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tileloaddrst1, DST = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -372,7 +372,7 @@ pub unsafe fn _tile_stream_loaddrs(base: *const u8, stride: usiz #[rustc_legacy_const_generics(0, 1, 2)] #[target_feature(enable = "amx-tf32")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tmmultf32ps, DST = 0, A = 1, B = 2) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -389,7 +389,7 @@ pub unsafe fn _tile_mmultf32ps() { #[rustc_legacy_const_generics(0)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tcvtrowd2ps, TILE = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -404,7 +404,7 @@ pub unsafe fn _tile_cvtrowd2ps(row: u32) -> __m512 { #[rustc_legacy_const_generics(0, 1)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tcvtrowd2ps, TILE = 0, ROW = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -421,7 +421,7 @@ pub unsafe fn _tile_cvtrowd2psi() -> __m512 { #[rustc_legacy_const_generics(0)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tcvtrowps2phh, TILE = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -437,7 +437,7 @@ pub unsafe fn _tile_cvtrowps2phh(row: u32) -> __m512h { #[rustc_legacy_const_generics(0, 1)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tcvtrowps2phh, TILE = 0, ROW = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -454,7 +454,7 @@ pub unsafe fn _tile_cvtrowps2phhi() -> __m512h #[rustc_legacy_const_generics(0)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tcvtrowps2phl, TILE = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -470,7 +470,7 @@ pub unsafe fn _tile_cvtrowps2phl(row: u32) -> __m512h { #[rustc_legacy_const_generics(0, 1)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tcvtrowps2phl, TILE = 0, ROW = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -487,7 +487,7 @@ pub unsafe fn _tile_cvtrowps2phli() -> __m512h #[rustc_legacy_const_generics(0)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tcvtrowps2bf16h, TILE = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -503,7 +503,7 @@ pub unsafe fn _tile_cvtrowps2bf16h(row: u32) -> __m512bh { #[rustc_legacy_const_generics(0, 1)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tcvtrowps2bf16h, TILE = 0, ROW = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -520,7 +520,7 @@ pub unsafe fn _tile_cvtrowps2bf16hi() -> __m512 #[rustc_legacy_const_generics(0)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tcvtrowps2bf16l, TILE = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -536,7 +536,7 @@ pub unsafe fn _tile_cvtrowps2bf16l(row: u32) -> __m512bh { #[rustc_legacy_const_generics(0, 1)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tcvtrowps2bf16l, TILE = 0, ROW = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -551,7 +551,7 @@ pub unsafe fn _tile_cvtrowps2bf16li() -> __m512 #[rustc_legacy_const_generics(0)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tilemovrow, TILE = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] @@ -565,7 +565,7 @@ pub unsafe fn _tile_movrow(row: u32) -> __m512i { #[rustc_legacy_const_generics(0, 1)] #[target_feature(enable = "amx-avx512,avx10.2")] #[cfg_attr( - all(test, any(target_os = "linux", target_env = "msvc")), + all(test, not(target_vendor = "apple")), assert_instr(tilemovrow, TILE = 0, ROW = 0) )] #[unstable(feature = "x86_amx_intrinsics", issue = "126622")] From 8276d419c4be9d6a93fdb81af61bc6b899a3c1a0 Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Thu, 9 Apr 2026 19:45:07 +0800 Subject: [PATCH 17/30] loongarch: Use `intrinsics::simd` for selected LSX/LASX intrinsics This change migrates a subset of LSX/LASX intrinsics to the portable `intrinsics::simd` interface. Only straightforward mappings are converted in this patch. Intrinsics that require more complex transformations or currently result in suboptimal code generation are intentionally left unchanged and will be migrated incrementally in follow-up patches. --- .../src/loongarch64/lasx/generated.rs | 2204 ++------------- .../core_arch/src/loongarch64/lasx/mod.rs | 7 + .../src/loongarch64/lasx/portable.rs | 201 ++ .../src/loongarch64/lsx/generated.rs | 2396 ++--------------- .../core_arch/src/loongarch64/lsx/mod.rs | 7 + .../core_arch/src/loongarch64/lsx/portable.rs | 207 ++ .../crates/core_arch/src/loongarch64/mod.rs | 1 + .../crates/core_arch/src/loongarch64/simd.rs | 302 +++ .../crates/stdarch-gen-loongarch/lasx.spec | 186 ++ .../crates/stdarch-gen-loongarch/lsx.spec | 192 ++ .../src/portable-intrinsics.txt | 379 +++ 11 files changed, 1951 insertions(+), 4131 deletions(-) create mode 100644 library/stdarch/crates/core_arch/src/loongarch64/lasx/portable.rs create mode 100644 library/stdarch/crates/core_arch/src/loongarch64/lsx/portable.rs create mode 100644 library/stdarch/crates/core_arch/src/loongarch64/simd.rs diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs index 5559c6ad4d0e8..de629914ab2c0 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs @@ -11,38 +11,6 @@ use super::super::*; #[allow(improper_ctypes)] unsafe extern "unadjusted" { - #[link_name = "llvm.loongarch.lasx.xvsll.b"] - fn __lasx_xvsll_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvsll.h"] - fn __lasx_xvsll_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvsll.w"] - fn __lasx_xvsll_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvsll.d"] - fn __lasx_xvsll_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvslli.b"] - fn __lasx_xvslli_b(a: __v32i8, b: u32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvslli.h"] - fn __lasx_xvslli_h(a: __v16i16, b: u32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvslli.w"] - fn __lasx_xvslli_w(a: __v8i32, b: u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvslli.d"] - fn __lasx_xvslli_d(a: __v4i64, b: u32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvsra.b"] - fn __lasx_xvsra_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvsra.h"] - fn __lasx_xvsra_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvsra.w"] - fn __lasx_xvsra_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvsra.d"] - fn __lasx_xvsra_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvsrai.b"] - fn __lasx_xvsrai_b(a: __v32i8, b: u32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvsrai.h"] - fn __lasx_xvsrai_h(a: __v16i16, b: u32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvsrai.w"] - fn __lasx_xvsrai_w(a: __v8i32, b: u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvsrai.d"] - fn __lasx_xvsrai_d(a: __v4i64, b: u32) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvsrar.b"] fn __lasx_xvsrar_b(a: __v32i8, b: __v32i8) -> __v32i8; #[link_name = "llvm.loongarch.lasx.xvsrar.h"] @@ -59,22 +27,6 @@ unsafe extern "unadjusted" { fn __lasx_xvsrari_w(a: __v8i32, b: u32) -> __v8i32; #[link_name = "llvm.loongarch.lasx.xvsrari.d"] fn __lasx_xvsrari_d(a: __v4i64, b: u32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvsrl.b"] - fn __lasx_xvsrl_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvsrl.h"] - fn __lasx_xvsrl_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvsrl.w"] - fn __lasx_xvsrl_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvsrl.d"] - fn __lasx_xvsrl_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvsrli.b"] - fn __lasx_xvsrli_b(a: __v32i8, b: u32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvsrli.h"] - fn __lasx_xvsrli_h(a: __v16i16, b: u32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvsrli.w"] - fn __lasx_xvsrli_w(a: __v8i32, b: u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvsrli.d"] - fn __lasx_xvsrli_d(a: __v4i64, b: u32) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvsrlr.b"] fn __lasx_xvsrlr_b(a: __v32i8, b: __v32i8) -> __v32i8; #[link_name = "llvm.loongarch.lasx.xvsrlr.h"] @@ -139,30 +91,6 @@ unsafe extern "unadjusted" { fn __lasx_xvbitrevi_w(a: __v8u32, b: u32) -> __v8u32; #[link_name = "llvm.loongarch.lasx.xvbitrevi.d"] fn __lasx_xvbitrevi_d(a: __v4u64, b: u32) -> __v4u64; - #[link_name = "llvm.loongarch.lasx.xvadd.b"] - fn __lasx_xvadd_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvadd.h"] - fn __lasx_xvadd_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvadd.w"] - fn __lasx_xvadd_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvadd.d"] - fn __lasx_xvadd_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvaddi.bu"] - fn __lasx_xvaddi_bu(a: __v32i8, b: u32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvaddi.hu"] - fn __lasx_xvaddi_hu(a: __v16i16, b: u32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvaddi.wu"] - fn __lasx_xvaddi_wu(a: __v8i32, b: u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvaddi.du"] - fn __lasx_xvaddi_du(a: __v4i64, b: u32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvsub.b"] - fn __lasx_xvsub_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvsub.h"] - fn __lasx_xvsub_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvsub.w"] - fn __lasx_xvsub_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvsub.d"] - fn __lasx_xvsub_d(a: __v4i64, b: __v4i64) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvsubi.bu"] fn __lasx_xvsubi_bu(a: __v32i8, b: u32) -> __v32i8; #[link_name = "llvm.loongarch.lasx.xvsubi.hu"] @@ -171,150 +99,6 @@ unsafe extern "unadjusted" { fn __lasx_xvsubi_wu(a: __v8i32, b: u32) -> __v8i32; #[link_name = "llvm.loongarch.lasx.xvsubi.du"] fn __lasx_xvsubi_du(a: __v4i64, b: u32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvmax.b"] - fn __lasx_xvmax_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvmax.h"] - fn __lasx_xvmax_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvmax.w"] - fn __lasx_xvmax_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvmax.d"] - fn __lasx_xvmax_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvmaxi.b"] - fn __lasx_xvmaxi_b(a: __v32i8, b: i32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvmaxi.h"] - fn __lasx_xvmaxi_h(a: __v16i16, b: i32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvmaxi.w"] - fn __lasx_xvmaxi_w(a: __v8i32, b: i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvmaxi.d"] - fn __lasx_xvmaxi_d(a: __v4i64, b: i32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvmax.bu"] - fn __lasx_xvmax_bu(a: __v32u8, b: __v32u8) -> __v32u8; - #[link_name = "llvm.loongarch.lasx.xvmax.hu"] - fn __lasx_xvmax_hu(a: __v16u16, b: __v16u16) -> __v16u16; - #[link_name = "llvm.loongarch.lasx.xvmax.wu"] - fn __lasx_xvmax_wu(a: __v8u32, b: __v8u32) -> __v8u32; - #[link_name = "llvm.loongarch.lasx.xvmax.du"] - fn __lasx_xvmax_du(a: __v4u64, b: __v4u64) -> __v4u64; - #[link_name = "llvm.loongarch.lasx.xvmaxi.bu"] - fn __lasx_xvmaxi_bu(a: __v32u8, b: u32) -> __v32u8; - #[link_name = "llvm.loongarch.lasx.xvmaxi.hu"] - fn __lasx_xvmaxi_hu(a: __v16u16, b: u32) -> __v16u16; - #[link_name = "llvm.loongarch.lasx.xvmaxi.wu"] - fn __lasx_xvmaxi_wu(a: __v8u32, b: u32) -> __v8u32; - #[link_name = "llvm.loongarch.lasx.xvmaxi.du"] - fn __lasx_xvmaxi_du(a: __v4u64, b: u32) -> __v4u64; - #[link_name = "llvm.loongarch.lasx.xvmin.b"] - fn __lasx_xvmin_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvmin.h"] - fn __lasx_xvmin_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvmin.w"] - fn __lasx_xvmin_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvmin.d"] - fn __lasx_xvmin_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvmini.b"] - fn __lasx_xvmini_b(a: __v32i8, b: i32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvmini.h"] - fn __lasx_xvmini_h(a: __v16i16, b: i32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvmini.w"] - fn __lasx_xvmini_w(a: __v8i32, b: i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvmini.d"] - fn __lasx_xvmini_d(a: __v4i64, b: i32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvmin.bu"] - fn __lasx_xvmin_bu(a: __v32u8, b: __v32u8) -> __v32u8; - #[link_name = "llvm.loongarch.lasx.xvmin.hu"] - fn __lasx_xvmin_hu(a: __v16u16, b: __v16u16) -> __v16u16; - #[link_name = "llvm.loongarch.lasx.xvmin.wu"] - fn __lasx_xvmin_wu(a: __v8u32, b: __v8u32) -> __v8u32; - #[link_name = "llvm.loongarch.lasx.xvmin.du"] - fn __lasx_xvmin_du(a: __v4u64, b: __v4u64) -> __v4u64; - #[link_name = "llvm.loongarch.lasx.xvmini.bu"] - fn __lasx_xvmini_bu(a: __v32u8, b: u32) -> __v32u8; - #[link_name = "llvm.loongarch.lasx.xvmini.hu"] - fn __lasx_xvmini_hu(a: __v16u16, b: u32) -> __v16u16; - #[link_name = "llvm.loongarch.lasx.xvmini.wu"] - fn __lasx_xvmini_wu(a: __v8u32, b: u32) -> __v8u32; - #[link_name = "llvm.loongarch.lasx.xvmini.du"] - fn __lasx_xvmini_du(a: __v4u64, b: u32) -> __v4u64; - #[link_name = "llvm.loongarch.lasx.xvseq.b"] - fn __lasx_xvseq_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvseq.h"] - fn __lasx_xvseq_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvseq.w"] - fn __lasx_xvseq_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvseq.d"] - fn __lasx_xvseq_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvseqi.b"] - fn __lasx_xvseqi_b(a: __v32i8, b: i32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvseqi.h"] - fn __lasx_xvseqi_h(a: __v16i16, b: i32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvseqi.w"] - fn __lasx_xvseqi_w(a: __v8i32, b: i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvseqi.d"] - fn __lasx_xvseqi_d(a: __v4i64, b: i32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvslt.b"] - fn __lasx_xvslt_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvslt.h"] - fn __lasx_xvslt_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvslt.w"] - fn __lasx_xvslt_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvslt.d"] - fn __lasx_xvslt_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvslti.b"] - fn __lasx_xvslti_b(a: __v32i8, b: i32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvslti.h"] - fn __lasx_xvslti_h(a: __v16i16, b: i32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvslti.w"] - fn __lasx_xvslti_w(a: __v8i32, b: i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvslti.d"] - fn __lasx_xvslti_d(a: __v4i64, b: i32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvslt.bu"] - fn __lasx_xvslt_bu(a: __v32u8, b: __v32u8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvslt.hu"] - fn __lasx_xvslt_hu(a: __v16u16, b: __v16u16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvslt.wu"] - fn __lasx_xvslt_wu(a: __v8u32, b: __v8u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvslt.du"] - fn __lasx_xvslt_du(a: __v4u64, b: __v4u64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvslti.bu"] - fn __lasx_xvslti_bu(a: __v32u8, b: u32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvslti.hu"] - fn __lasx_xvslti_hu(a: __v16u16, b: u32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvslti.wu"] - fn __lasx_xvslti_wu(a: __v8u32, b: u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvslti.du"] - fn __lasx_xvslti_du(a: __v4u64, b: u32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvsle.b"] - fn __lasx_xvsle_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvsle.h"] - fn __lasx_xvsle_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvsle.w"] - fn __lasx_xvsle_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvsle.d"] - fn __lasx_xvsle_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvslei.b"] - fn __lasx_xvslei_b(a: __v32i8, b: i32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvslei.h"] - fn __lasx_xvslei_h(a: __v16i16, b: i32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvslei.w"] - fn __lasx_xvslei_w(a: __v8i32, b: i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvslei.d"] - fn __lasx_xvslei_d(a: __v4i64, b: i32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvsle.bu"] - fn __lasx_xvsle_bu(a: __v32u8, b: __v32u8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvsle.hu"] - fn __lasx_xvsle_hu(a: __v16u16, b: __v16u16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvsle.wu"] - fn __lasx_xvsle_wu(a: __v8u32, b: __v8u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvsle.du"] - fn __lasx_xvsle_du(a: __v4u64, b: __v4u64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvslei.bu"] - fn __lasx_xvslei_bu(a: __v32u8, b: u32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvslei.hu"] - fn __lasx_xvslei_hu(a: __v16u16, b: u32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvslei.wu"] - fn __lasx_xvslei_wu(a: __v8u32, b: u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvslei.du"] - fn __lasx_xvslei_du(a: __v4u64, b: u32) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvsat.b"] fn __lasx_xvsat_b(a: __v32i8, b: u32) -> __v32i8; #[link_name = "llvm.loongarch.lasx.xvsat.h"] @@ -419,46 +203,6 @@ unsafe extern "unadjusted" { fn __lasx_xvabsd_wu(a: __v8u32, b: __v8u32) -> __v8u32; #[link_name = "llvm.loongarch.lasx.xvabsd.du"] fn __lasx_xvabsd_du(a: __v4u64, b: __v4u64) -> __v4u64; - #[link_name = "llvm.loongarch.lasx.xvmul.b"] - fn __lasx_xvmul_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvmul.h"] - fn __lasx_xvmul_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvmul.w"] - fn __lasx_xvmul_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvmul.d"] - fn __lasx_xvmul_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvmadd.b"] - fn __lasx_xvmadd_b(a: __v32i8, b: __v32i8, c: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvmadd.h"] - fn __lasx_xvmadd_h(a: __v16i16, b: __v16i16, c: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvmadd.w"] - fn __lasx_xvmadd_w(a: __v8i32, b: __v8i32, c: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvmadd.d"] - fn __lasx_xvmadd_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvmsub.b"] - fn __lasx_xvmsub_b(a: __v32i8, b: __v32i8, c: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvmsub.h"] - fn __lasx_xvmsub_h(a: __v16i16, b: __v16i16, c: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvmsub.w"] - fn __lasx_xvmsub_w(a: __v8i32, b: __v8i32, c: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvmsub.d"] - fn __lasx_xvmsub_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvdiv.b"] - fn __lasx_xvdiv_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvdiv.h"] - fn __lasx_xvdiv_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvdiv.w"] - fn __lasx_xvdiv_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvdiv.d"] - fn __lasx_xvdiv_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvdiv.bu"] - fn __lasx_xvdiv_bu(a: __v32u8, b: __v32u8) -> __v32u8; - #[link_name = "llvm.loongarch.lasx.xvdiv.hu"] - fn __lasx_xvdiv_hu(a: __v16u16, b: __v16u16) -> __v16u16; - #[link_name = "llvm.loongarch.lasx.xvdiv.wu"] - fn __lasx_xvdiv_wu(a: __v8u32, b: __v8u32) -> __v8u32; - #[link_name = "llvm.loongarch.lasx.xvdiv.du"] - fn __lasx_xvdiv_du(a: __v4u64, b: __v4u64) -> __v4u64; #[link_name = "llvm.loongarch.lasx.xvhaddw.h.b"] fn __lasx_xvhaddw_h_b(a: __v32i8, b: __v32i8) -> __v16i16; #[link_name = "llvm.loongarch.lasx.xvhaddw.w.h"] @@ -483,22 +227,6 @@ unsafe extern "unadjusted" { fn __lasx_xvhsubw_wu_hu(a: __v16u16, b: __v16u16) -> __v8i32; #[link_name = "llvm.loongarch.lasx.xvhsubw.du.wu"] fn __lasx_xvhsubw_du_wu(a: __v8u32, b: __v8u32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvmod.b"] - fn __lasx_xvmod_b(a: __v32i8, b: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvmod.h"] - fn __lasx_xvmod_h(a: __v16i16, b: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvmod.w"] - fn __lasx_xvmod_w(a: __v8i32, b: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvmod.d"] - fn __lasx_xvmod_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvmod.bu"] - fn __lasx_xvmod_bu(a: __v32u8, b: __v32u8) -> __v32u8; - #[link_name = "llvm.loongarch.lasx.xvmod.hu"] - fn __lasx_xvmod_hu(a: __v16u16, b: __v16u16) -> __v16u16; - #[link_name = "llvm.loongarch.lasx.xvmod.wu"] - fn __lasx_xvmod_wu(a: __v8u32, b: __v8u32) -> __v8u32; - #[link_name = "llvm.loongarch.lasx.xvmod.du"] - fn __lasx_xvmod_du(a: __v4u64, b: __v4u64) -> __v4u64; #[link_name = "llvm.loongarch.lasx.xvrepl128vei.b"] fn __lasx_xvrepl128vei_b(a: __v32i8, b: u32) -> __v32i8; #[link_name = "llvm.loongarch.lasx.xvrepl128vei.h"] @@ -563,20 +291,12 @@ unsafe extern "unadjusted" { fn __lasx_xvshuf_w(a: __v8i32, b: __v8i32, c: __v8i32) -> __v8i32; #[link_name = "llvm.loongarch.lasx.xvshuf.d"] fn __lasx_xvshuf_d(a: __v4i64, b: __v4i64, c: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvand.v"] - fn __lasx_xvand_v(a: __v32u8, b: __v32u8) -> __v32u8; #[link_name = "llvm.loongarch.lasx.xvandi.b"] fn __lasx_xvandi_b(a: __v32u8, b: u32) -> __v32u8; - #[link_name = "llvm.loongarch.lasx.xvor.v"] - fn __lasx_xvor_v(a: __v32u8, b: __v32u8) -> __v32u8; #[link_name = "llvm.loongarch.lasx.xvori.b"] fn __lasx_xvori_b(a: __v32u8, b: u32) -> __v32u8; - #[link_name = "llvm.loongarch.lasx.xvnor.v"] - fn __lasx_xvnor_v(a: __v32u8, b: __v32u8) -> __v32u8; #[link_name = "llvm.loongarch.lasx.xvnori.b"] fn __lasx_xvnori_b(a: __v32u8, b: u32) -> __v32u8; - #[link_name = "llvm.loongarch.lasx.xvxor.v"] - fn __lasx_xvxor_v(a: __v32u8, b: __v32u8) -> __v32u8; #[link_name = "llvm.loongarch.lasx.xvxori.b"] fn __lasx_xvxori_b(a: __v32u8, b: u32) -> __v32u8; #[link_name = "llvm.loongarch.lasx.xvbitsel.v"] @@ -589,22 +309,6 @@ unsafe extern "unadjusted" { fn __lasx_xvshuf4i_h(a: __v16i16, b: u32) -> __v16i16; #[link_name = "llvm.loongarch.lasx.xvshuf4i.w"] fn __lasx_xvshuf4i_w(a: __v8i32, b: u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.b"] - fn __lasx_xvreplgr2vr_b(a: i32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.h"] - fn __lasx_xvreplgr2vr_h(a: i32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.w"] - fn __lasx_xvreplgr2vr_w(a: i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvreplgr2vr.d"] - fn __lasx_xvreplgr2vr_d(a: i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvpcnt.b"] - fn __lasx_xvpcnt_b(a: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvpcnt.h"] - fn __lasx_xvpcnt_h(a: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvpcnt.w"] - fn __lasx_xvpcnt_w(a: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvpcnt.d"] - fn __lasx_xvpcnt_d(a: __v4i64) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvclo.b"] fn __lasx_xvclo_b(a: __v32i8) -> __v32i8; #[link_name = "llvm.loongarch.lasx.xvclo.h"] @@ -613,30 +317,6 @@ unsafe extern "unadjusted" { fn __lasx_xvclo_w(a: __v8i32) -> __v8i32; #[link_name = "llvm.loongarch.lasx.xvclo.d"] fn __lasx_xvclo_d(a: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvclz.b"] - fn __lasx_xvclz_b(a: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvclz.h"] - fn __lasx_xvclz_h(a: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvclz.w"] - fn __lasx_xvclz_w(a: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvclz.d"] - fn __lasx_xvclz_d(a: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvfadd.s"] - fn __lasx_xvfadd_s(a: __v8f32, b: __v8f32) -> __v8f32; - #[link_name = "llvm.loongarch.lasx.xvfadd.d"] - fn __lasx_xvfadd_d(a: __v4f64, b: __v4f64) -> __v4f64; - #[link_name = "llvm.loongarch.lasx.xvfsub.s"] - fn __lasx_xvfsub_s(a: __v8f32, b: __v8f32) -> __v8f32; - #[link_name = "llvm.loongarch.lasx.xvfsub.d"] - fn __lasx_xvfsub_d(a: __v4f64, b: __v4f64) -> __v4f64; - #[link_name = "llvm.loongarch.lasx.xvfmul.s"] - fn __lasx_xvfmul_s(a: __v8f32, b: __v8f32) -> __v8f32; - #[link_name = "llvm.loongarch.lasx.xvfmul.d"] - fn __lasx_xvfmul_d(a: __v4f64, b: __v4f64) -> __v4f64; - #[link_name = "llvm.loongarch.lasx.xvfdiv.s"] - fn __lasx_xvfdiv_s(a: __v8f32, b: __v8f32) -> __v8f32; - #[link_name = "llvm.loongarch.lasx.xvfdiv.d"] - fn __lasx_xvfdiv_d(a: __v4f64, b: __v4f64) -> __v4f64; #[link_name = "llvm.loongarch.lasx.xvfcvt.h.s"] fn __lasx_xvfcvt_h_s(a: __v8f32, b: __v8f32) -> __v16i16; #[link_name = "llvm.loongarch.lasx.xvfcvt.s.d"] @@ -661,10 +341,6 @@ unsafe extern "unadjusted" { fn __lasx_xvfclass_s(a: __v8f32) -> __v8i32; #[link_name = "llvm.loongarch.lasx.xvfclass.d"] fn __lasx_xvfclass_d(a: __v4f64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvfsqrt.s"] - fn __lasx_xvfsqrt_s(a: __v8f32) -> __v8f32; - #[link_name = "llvm.loongarch.lasx.xvfsqrt.d"] - fn __lasx_xvfsqrt_d(a: __v4f64) -> __v4f64; #[link_name = "llvm.loongarch.lasx.xvfrecip.s"] fn __lasx_xvfrecip_s(a: __v8f32) -> __v8f32; #[link_name = "llvm.loongarch.lasx.xvfrecip.d"] @@ -731,16 +407,6 @@ unsafe extern "unadjusted" { fn __lasx_xvreplve_d(a: __v4i64, b: i32) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvpermi.w"] fn __lasx_xvpermi_w(a: __v8i32, b: __v8i32, c: u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvandn.v"] - fn __lasx_xvandn_v(a: __v32u8, b: __v32u8) -> __v32u8; - #[link_name = "llvm.loongarch.lasx.xvneg.b"] - fn __lasx_xvneg_b(a: __v32i8) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvneg.h"] - fn __lasx_xvneg_h(a: __v16i16) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvneg.w"] - fn __lasx_xvneg_w(a: __v8i32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvneg.d"] - fn __lasx_xvneg_d(a: __v4i64) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvmuh.b"] fn __lasx_xvmuh_b(a: __v32i8, b: __v32i8) -> __v32i8; #[link_name = "llvm.loongarch.lasx.xvmuh.h"] @@ -867,22 +533,6 @@ unsafe extern "unadjusted" { fn __lasx_xvsigncov_w(a: __v8i32, b: __v8i32) -> __v8i32; #[link_name = "llvm.loongarch.lasx.xvsigncov.d"] fn __lasx_xvsigncov_d(a: __v4i64, b: __v4i64) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvfmadd.s"] - fn __lasx_xvfmadd_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32; - #[link_name = "llvm.loongarch.lasx.xvfmadd.d"] - fn __lasx_xvfmadd_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64; - #[link_name = "llvm.loongarch.lasx.xvfmsub.s"] - fn __lasx_xvfmsub_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32; - #[link_name = "llvm.loongarch.lasx.xvfmsub.d"] - fn __lasx_xvfmsub_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64; - #[link_name = "llvm.loongarch.lasx.xvfnmadd.s"] - fn __lasx_xvfnmadd_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32; - #[link_name = "llvm.loongarch.lasx.xvfnmadd.d"] - fn __lasx_xvfnmadd_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64; - #[link_name = "llvm.loongarch.lasx.xvfnmsub.s"] - fn __lasx_xvfnmsub_s(a: __v8f32, b: __v8f32, c: __v8f32) -> __v8f32; - #[link_name = "llvm.loongarch.lasx.xvfnmsub.d"] - fn __lasx_xvfnmsub_d(a: __v4f64, b: __v4f64, c: __v4f64) -> __v4f64; #[link_name = "llvm.loongarch.lasx.xvftintrne.w.s"] fn __lasx_xvftintrne_w_s(a: __v8f32) -> __v8i32; #[link_name = "llvm.loongarch.lasx.xvftintrne.l.d"] @@ -979,8 +629,6 @@ unsafe extern "unadjusted" { fn __lasx_xvssrln_h_w(a: __v8i32, b: __v8i32) -> __v16i16; #[link_name = "llvm.loongarch.lasx.xvssrln.w.d"] fn __lasx_xvssrln_w_d(a: __v4i64, b: __v4i64) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvorn.v"] - fn __lasx_xvorn_v(a: __v32u8, b: __v32u8) -> __v32u8; #[link_name = "llvm.loongarch.lasx.xvldi"] fn __lasx_xvldi(a: i32) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvldx"] @@ -989,10 +637,6 @@ unsafe extern "unadjusted" { fn __lasx_xvstx(a: __v32i8, b: *mut i8, c: i64); #[link_name = "llvm.loongarch.lasx.xvextl.qu.du"] fn __lasx_xvextl_qu_du(a: __v4u64) -> __v4u64; - #[link_name = "llvm.loongarch.lasx.xvinsgr2vr.w"] - fn __lasx_xvinsgr2vr_w(a: __v8i32, b: i32, c: u32) -> __v8i32; - #[link_name = "llvm.loongarch.lasx.xvinsgr2vr.d"] - fn __lasx_xvinsgr2vr_d(a: __v4i64, b: i64, c: u32) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvreplve0.b"] fn __lasx_xvreplve0_b(a: __v32i8) -> __v32i8; #[link_name = "llvm.loongarch.lasx.xvreplve0.h"] @@ -1041,14 +685,6 @@ unsafe extern "unadjusted" { fn __lasx_xvldrepl_w(a: *const i8, b: i32) -> __v8i32; #[link_name = "llvm.loongarch.lasx.xvldrepl.d"] fn __lasx_xvldrepl_d(a: *const i8, b: i32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvpickve2gr.w"] - fn __lasx_xvpickve2gr_w(a: __v8i32, b: u32) -> i32; - #[link_name = "llvm.loongarch.lasx.xvpickve2gr.wu"] - fn __lasx_xvpickve2gr_wu(a: __v8i32, b: u32) -> u32; - #[link_name = "llvm.loongarch.lasx.xvpickve2gr.d"] - fn __lasx_xvpickve2gr_d(a: __v4i64, b: u32) -> i64; - #[link_name = "llvm.loongarch.lasx.xvpickve2gr.du"] - fn __lasx_xvpickve2gr_du(a: __v4i64, b: u32) -> u64; #[link_name = "llvm.loongarch.lasx.xvaddwev.q.d"] fn __lasx_xvaddwev_q_d(a: __v4i64, b: __v4i64) -> __v4i64; #[link_name = "llvm.loongarch.lasx.xvaddwev.d.w"] @@ -1483,14 +1119,6 @@ unsafe extern "unadjusted" { fn __lasx_xvpickve_d_f(a: __v4f64, b: u32) -> __v4f64; #[link_name = "llvm.loongarch.lasx.xvpickve.w.f"] fn __lasx_xvpickve_w_f(a: __v8f32, b: u32) -> __v8f32; - #[link_name = "llvm.loongarch.lasx.xvrepli.b"] - fn __lasx_xvrepli_b(a: i32) -> __v32i8; - #[link_name = "llvm.loongarch.lasx.xvrepli.d"] - fn __lasx_xvrepli_d(a: i32) -> __v4i64; - #[link_name = "llvm.loongarch.lasx.xvrepli.h"] - fn __lasx_xvrepli_h(a: i32) -> __v16i16; - #[link_name = "llvm.loongarch.lasx.xvrepli.w"] - fn __lasx_xvrepli_w(a: i32) -> __v8i32; #[link_name = "llvm.loongarch.lasx.cast.128.s"] fn __lasx_cast_128_s(a: __v4f32) -> __v8f32; #[link_name = "llvm.loongarch.lasx.cast.128.d"] @@ -1529,134 +1157,6 @@ unsafe extern "unadjusted" { fn __lasx_insert_128_hi(a: __v4i64, b: __v2i64) -> __v4i64; } -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsll_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsll_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsll_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsll_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsll_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsll_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsll_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsll_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslli_b(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lasx_xvslli_b(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslli_h(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lasx_xvslli_h(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslli_w(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvslli_w(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslli_d(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM6, 6); - unsafe { transmute(__lasx_xvslli_d(transmute(a), IMM6)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsra_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsra_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsra_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsra_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsra_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsra_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsra_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsra_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrai_b(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lasx_xvsrai_b(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrai_h(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lasx_xvsrai_h(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrai_w(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvsrai_w(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrai_d(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM6, 6); - unsafe { transmute(__lasx_xvsrai_d(transmute(a), IMM6)) } -} - #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1721,70 +1221,6 @@ pub fn lasx_xvsrari_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrari_d(transmute(a), IMM6)) } } -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrl_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsrl_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrl_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsrl_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrl_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsrl_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrl_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsrl_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrli_b(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lasx_xvsrli_b(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrli_h(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lasx_xvsrli_h(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrli_w(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvsrli_w(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsrli_d(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM6, 6); - unsafe { transmute(__lasx_xvsrli_d(transmute(a), IMM6)) } -} - #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2043,911 +1479,243 @@ pub fn lasx_xvbitrevi_d(a: m256i) -> m256i { #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvadd_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvadd_b(transmute(a), transmute(b))) } +pub fn lasx_xvsubi_bu(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM5, 5); + unsafe { transmute(__lasx_xvsubi_bu(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvadd_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvadd_h(transmute(a), transmute(b))) } +pub fn lasx_xvsubi_hu(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM5, 5); + unsafe { transmute(__lasx_xvsubi_hu(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvadd_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvadd_w(transmute(a), transmute(b))) } +pub fn lasx_xvsubi_wu(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM5, 5); + unsafe { transmute(__lasx_xvsubi_wu(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvadd_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvadd_d(transmute(a), transmute(b))) } +pub fn lasx_xvsubi_du(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM5, 5); + unsafe { transmute(__lasx_xvsubi_du(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvaddi_bu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvaddi_bu(transmute(a), IMM5)) } +pub fn lasx_xvsat_b(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM3, 3); + unsafe { transmute(__lasx_xvsat_b(transmute(a), IMM3)) } } #[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvaddi_hu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvaddi_hu(transmute(a), IMM5)) } +pub fn lasx_xvsat_h(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM4, 4); + unsafe { transmute(__lasx_xvsat_h(transmute(a), IMM4)) } } #[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvaddi_wu(a: m256i) -> m256i { +pub fn lasx_xvsat_w(a: m256i) -> m256i { static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvaddi_wu(transmute(a), IMM5)) } + unsafe { transmute(__lasx_xvsat_w(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvaddi_du(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvaddi_du(transmute(a), IMM5)) } +pub fn lasx_xvsat_d(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM6, 6); + unsafe { transmute(__lasx_xvsat_d(transmute(a), IMM6)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsub_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsub_b(transmute(a), transmute(b))) } +pub fn lasx_xvsat_bu(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM3, 3); + unsafe { transmute(__lasx_xvsat_bu(transmute(a), IMM3)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsub_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsub_h(transmute(a), transmute(b))) } +pub fn lasx_xvsat_hu(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM4, 4); + unsafe { transmute(__lasx_xvsat_hu(transmute(a), IMM4)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsub_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsub_w(transmute(a), transmute(b))) } +pub fn lasx_xvsat_wu(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM5, 5); + unsafe { transmute(__lasx_xvsat_wu(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsub_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsub_d(transmute(a), transmute(b))) } +pub fn lasx_xvsat_du(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM6, 6); + unsafe { transmute(__lasx_xvsat_du(transmute(a), IMM6)) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsubi_bu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvsubi_bu(transmute(a), IMM5)) } +pub fn lasx_xvadda_b(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvadda_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsubi_hu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvsubi_hu(transmute(a), IMM5)) } +pub fn lasx_xvadda_h(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvadda_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsubi_wu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvsubi_wu(transmute(a), IMM5)) } +pub fn lasx_xvadda_w(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvadda_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsubi_du(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvsubi_du(transmute(a), IMM5)) } +pub fn lasx_xvadda_d(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvadda_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmax_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmax_b(transmute(a), transmute(b))) } +pub fn lasx_xvsadd_b(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvsadd_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmax_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmax_h(transmute(a), transmute(b))) } +pub fn lasx_xvsadd_h(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvsadd_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmax_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmax_w(transmute(a), transmute(b))) } +pub fn lasx_xvsadd_w(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvsadd_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmax_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmax_d(transmute(a), transmute(b))) } +pub fn lasx_xvsadd_d(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvsadd_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmaxi_b(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvmaxi_b(transmute(a), IMM_S5)) } +pub fn lasx_xvsadd_bu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvsadd_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmaxi_h(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvmaxi_h(transmute(a), IMM_S5)) } +pub fn lasx_xvsadd_hu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvsadd_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmaxi_w(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvmaxi_w(transmute(a), IMM_S5)) } +pub fn lasx_xvsadd_wu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvsadd_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmaxi_d(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvmaxi_d(transmute(a), IMM_S5)) } +pub fn lasx_xvsadd_du(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvsadd_du(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmax_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmax_bu(transmute(a), transmute(b))) } +pub fn lasx_xvavg_b(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavg_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmax_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmax_hu(transmute(a), transmute(b))) } +pub fn lasx_xvavg_h(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavg_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmax_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmax_wu(transmute(a), transmute(b))) } +pub fn lasx_xvavg_w(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavg_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmax_du(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmax_du(transmute(a), transmute(b))) } +pub fn lasx_xvavg_d(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavg_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmaxi_bu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvmaxi_bu(transmute(a), IMM5)) } +pub fn lasx_xvavg_bu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavg_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmaxi_hu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvmaxi_hu(transmute(a), IMM5)) } +pub fn lasx_xvavg_hu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavg_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmaxi_wu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvmaxi_wu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmaxi_du(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvmaxi_du(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmin_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmin_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmin_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmin_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmin_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmin_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmin_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmin_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmini_b(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvmini_b(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmini_h(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvmini_h(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmini_w(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvmini_w(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmini_d(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvmini_d(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmin_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmin_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmin_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmin_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmin_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmin_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmin_du(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmin_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmini_bu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvmini_bu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmini_hu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvmini_hu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmini_wu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvmini_wu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmini_du(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvmini_du(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvseq_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvseq_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvseq_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvseq_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvseq_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvseq_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvseq_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvseq_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvseqi_b(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvseqi_b(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvseqi_h(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvseqi_h(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvseqi_w(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvseqi_w(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvseqi_d(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvseqi_d(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslt_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvslt_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslt_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvslt_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslt_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvslt_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslt_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvslt_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslti_b(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvslti_b(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslti_h(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvslti_h(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslti_w(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvslti_w(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslti_d(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvslti_d(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslt_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvslt_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslt_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvslt_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslt_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvslt_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslt_du(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvslt_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslti_bu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvslti_bu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslti_hu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvslti_hu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslti_wu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvslti_wu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslti_du(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvslti_du(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsle_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsle_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsle_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsle_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsle_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsle_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsle_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsle_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslei_b(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvslei_b(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslei_h(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvslei_h(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslei_w(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvslei_w(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslei_d(a: m256i) -> m256i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lasx_xvslei_d(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsle_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsle_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsle_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsle_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsle_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsle_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsle_du(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsle_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslei_bu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvslei_bu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslei_hu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvslei_hu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslei_wu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvslei_wu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvslei_du(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvslei_du(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsat_b(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lasx_xvsat_b(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsat_h(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lasx_xvsat_h(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsat_w(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvsat_w(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsat_d(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM6, 6); - unsafe { transmute(__lasx_xvsat_d(transmute(a), IMM6)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsat_bu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lasx_xvsat_bu(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsat_hu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lasx_xvsat_hu(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsat_wu(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lasx_xvsat_wu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsat_du(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM6, 6); - unsafe { transmute(__lasx_xvsat_du(transmute(a), IMM6)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvadda_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvadda_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvadda_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvadda_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvadda_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvadda_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvadda_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvadda_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsadd_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsadd_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsadd_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsadd_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsadd_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsadd_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsadd_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsadd_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsadd_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsadd_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsadd_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsadd_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsadd_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsadd_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvsadd_du(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvsadd_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavg_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavg_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavg_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavg_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavg_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavg_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavg_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavg_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavg_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavg_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavg_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavg_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavg_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavg_wu(transmute(a), transmute(b))) } +pub fn lasx_xvavg_wu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavg_wu(transmute(a), transmute(b))) } } #[inline(always)] @@ -2967,302 +1735,162 @@ pub fn lasx_xvavgr_b(a: m256i, b: m256i) -> m256i { #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavgr_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavgr_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavgr_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavgr_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavgr_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavgr_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavgr_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavgr_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavgr_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavgr_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavgr_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavgr_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvavgr_du(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvavgr_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvssub_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvssub_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvssub_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvssub_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvssub_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvssub_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvssub_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvssub_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvssub_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvssub_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvssub_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvssub_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvssub_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvssub_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvssub_du(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvssub_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvabsd_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvabsd_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvabsd_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvabsd_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvabsd_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvabsd_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvabsd_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvabsd_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvabsd_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvabsd_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvabsd_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvabsd_hu(transmute(a), transmute(b))) } +pub fn lasx_xvavgr_h(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavgr_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvabsd_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvabsd_wu(transmute(a), transmute(b))) } +pub fn lasx_xvavgr_w(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavgr_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvabsd_du(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvabsd_du(transmute(a), transmute(b))) } +pub fn lasx_xvavgr_d(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavgr_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmul_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmul_b(transmute(a), transmute(b))) } +pub fn lasx_xvavgr_bu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavgr_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmul_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmul_h(transmute(a), transmute(b))) } +pub fn lasx_xvavgr_hu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavgr_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmul_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmul_w(transmute(a), transmute(b))) } +pub fn lasx_xvavgr_wu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavgr_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmul_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmul_d(transmute(a), transmute(b))) } +pub fn lasx_xvavgr_du(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvavgr_du(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmadd_b(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvmadd_b(transmute(a), transmute(b), transmute(c))) } +pub fn lasx_xvssub_b(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvssub_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmadd_h(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvmadd_h(transmute(a), transmute(b), transmute(c))) } +pub fn lasx_xvssub_h(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvssub_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmadd_w(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvmadd_w(transmute(a), transmute(b), transmute(c))) } +pub fn lasx_xvssub_w(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvssub_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmadd_d(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvmadd_d(transmute(a), transmute(b), transmute(c))) } +pub fn lasx_xvssub_d(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvssub_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmsub_b(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvmsub_b(transmute(a), transmute(b), transmute(c))) } +pub fn lasx_xvssub_bu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvssub_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmsub_h(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvmsub_h(transmute(a), transmute(b), transmute(c))) } +pub fn lasx_xvssub_hu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvssub_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmsub_w(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvmsub_w(transmute(a), transmute(b), transmute(c))) } +pub fn lasx_xvssub_wu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvssub_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmsub_d(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvmsub_d(transmute(a), transmute(b), transmute(c))) } +pub fn lasx_xvssub_du(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvssub_du(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvdiv_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvdiv_b(transmute(a), transmute(b))) } +pub fn lasx_xvabsd_b(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvabsd_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvdiv_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvdiv_h(transmute(a), transmute(b))) } +pub fn lasx_xvabsd_h(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvabsd_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvdiv_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvdiv_w(transmute(a), transmute(b))) } +pub fn lasx_xvabsd_w(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvabsd_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvdiv_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvdiv_d(transmute(a), transmute(b))) } +pub fn lasx_xvabsd_d(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvabsd_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvdiv_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvdiv_bu(transmute(a), transmute(b))) } +pub fn lasx_xvabsd_bu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvabsd_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvdiv_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvdiv_hu(transmute(a), transmute(b))) } +pub fn lasx_xvabsd_hu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvabsd_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvdiv_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvdiv_wu(transmute(a), transmute(b))) } +pub fn lasx_xvabsd_wu(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvabsd_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvdiv_du(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvdiv_du(transmute(a), transmute(b))) } +pub fn lasx_xvabsd_du(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvabsd_du(transmute(a), transmute(b))) } } #[inline(always)] @@ -3349,62 +1977,6 @@ pub fn lasx_xvhsubw_du_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_du_wu(transmute(a), transmute(b))) } } -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmod_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmod_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmod_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmod_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmod_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmod_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmod_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmod_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmod_bu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmod_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmod_hu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmod_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmod_wu(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmod_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvmod_du(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvmod_du(transmute(a), transmute(b))) } -} - #[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] @@ -3521,395 +2093,227 @@ pub fn lasx_xvilvh_w(a: m256i, b: m256i) -> m256i { #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvilvh_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvilvh_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvilvl_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvilvl_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvilvl_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvilvl_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvilvl_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvilvl_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvilvl_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvilvl_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpackev_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvpackev_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpackev_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvpackev_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpackev_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvpackev_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpackev_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvpackev_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpackod_b(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvpackod_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpackod_h(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvpackod_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpackod_w(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvpackod_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpackod_d(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvpackod_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvshuf_b(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvshuf_b(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvshuf_h(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvshuf_h(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvshuf_w(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvshuf_w(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvshuf_d(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvshuf_d(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvand_v(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvand_v(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvandi_b(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lasx_xvandi_b(transmute(a), IMM8)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvor_v(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvor_v(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvori_b(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lasx_xvori_b(transmute(a), IMM8)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvnor_v(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvnor_v(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvnori_b(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lasx_xvnori_b(transmute(a), IMM8)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvxor_v(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvxor_v(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvxori_b(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lasx_xvxori_b(transmute(a), IMM8)) } +pub fn lasx_xvilvh_d(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvilvh_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvbitsel_v(a: m256i, b: m256i, c: m256i) -> m256i { - unsafe { transmute(__lasx_xvbitsel_v(transmute(a), transmute(b), transmute(c))) } +pub fn lasx_xvilvl_b(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvilvl_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvbitseli_b(a: m256i, b: m256i) -> m256i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lasx_xvbitseli_b(transmute(a), transmute(b), IMM8)) } +pub fn lasx_xvilvl_h(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvilvl_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvshuf4i_b(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lasx_xvshuf4i_b(transmute(a), IMM8)) } +pub fn lasx_xvilvl_w(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvilvl_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvshuf4i_h(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lasx_xvshuf4i_h(transmute(a), IMM8)) } +pub fn lasx_xvilvl_d(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvilvl_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvshuf4i_w(a: m256i) -> m256i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lasx_xvshuf4i_w(transmute(a), IMM8)) } +pub fn lasx_xvpackev_b(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvpackev_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvreplgr2vr_b(a: i32) -> m256i { - unsafe { transmute(__lasx_xvreplgr2vr_b(transmute(a))) } +pub fn lasx_xvpackev_h(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvpackev_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvreplgr2vr_h(a: i32) -> m256i { - unsafe { transmute(__lasx_xvreplgr2vr_h(transmute(a))) } +pub fn lasx_xvpackev_w(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvpackev_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvreplgr2vr_w(a: i32) -> m256i { - unsafe { transmute(__lasx_xvreplgr2vr_w(transmute(a))) } +pub fn lasx_xvpackev_d(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvpackev_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvreplgr2vr_d(a: i64) -> m256i { - unsafe { transmute(__lasx_xvreplgr2vr_d(transmute(a))) } +pub fn lasx_xvpackod_b(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvpackod_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpcnt_b(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvpcnt_b(transmute(a))) } +pub fn lasx_xvpackod_h(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvpackod_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpcnt_h(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvpcnt_h(transmute(a))) } +pub fn lasx_xvpackod_w(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvpackod_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpcnt_w(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvpcnt_w(transmute(a))) } +pub fn lasx_xvpackod_d(a: m256i, b: m256i) -> m256i { + unsafe { transmute(__lasx_xvpackod_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpcnt_d(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvpcnt_d(transmute(a))) } +pub fn lasx_xvshuf_b(a: m256i, b: m256i, c: m256i) -> m256i { + unsafe { transmute(__lasx_xvshuf_b(transmute(a), transmute(b), transmute(c))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvclo_b(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvclo_b(transmute(a))) } +pub fn lasx_xvshuf_h(a: m256i, b: m256i, c: m256i) -> m256i { + unsafe { transmute(__lasx_xvshuf_h(transmute(a), transmute(b), transmute(c))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvclo_h(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvclo_h(transmute(a))) } +pub fn lasx_xvshuf_w(a: m256i, b: m256i, c: m256i) -> m256i { + unsafe { transmute(__lasx_xvshuf_w(transmute(a), transmute(b), transmute(c))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvclo_w(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvclo_w(transmute(a))) } +pub fn lasx_xvshuf_d(a: m256i, b: m256i, c: m256i) -> m256i { + unsafe { transmute(__lasx_xvshuf_d(transmute(a), transmute(b), transmute(c))) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvclo_d(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvclo_d(transmute(a))) } +pub fn lasx_xvandi_b(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lasx_xvandi_b(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvclz_b(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvclz_b(transmute(a))) } +pub fn lasx_xvori_b(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lasx_xvori_b(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvclz_h(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvclz_h(transmute(a))) } +pub fn lasx_xvnori_b(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lasx_xvnori_b(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvclz_w(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvclz_w(transmute(a))) } +pub fn lasx_xvxori_b(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lasx_xvxori_b(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvclz_d(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvclz_d(transmute(a))) } +pub fn lasx_xvbitsel_v(a: m256i, b: m256i, c: m256i) -> m256i { + unsafe { transmute(__lasx_xvbitsel_v(transmute(a), transmute(b), transmute(c))) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfadd_s(a: m256, b: m256) -> m256 { - unsafe { transmute(__lasx_xvfadd_s(transmute(a), transmute(b))) } +pub fn lasx_xvbitseli_b(a: m256i, b: m256i) -> m256i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lasx_xvbitseli_b(transmute(a), transmute(b), IMM8)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfadd_d(a: m256d, b: m256d) -> m256d { - unsafe { transmute(__lasx_xvfadd_d(transmute(a), transmute(b))) } +pub fn lasx_xvshuf4i_b(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lasx_xvshuf4i_b(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfsub_s(a: m256, b: m256) -> m256 { - unsafe { transmute(__lasx_xvfsub_s(transmute(a), transmute(b))) } +pub fn lasx_xvshuf4i_h(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lasx_xvshuf4i_h(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lasx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfsub_d(a: m256d, b: m256d) -> m256d { - unsafe { transmute(__lasx_xvfsub_d(transmute(a), transmute(b))) } +pub fn lasx_xvshuf4i_w(a: m256i) -> m256i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lasx_xvshuf4i_w(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfmul_s(a: m256, b: m256) -> m256 { - unsafe { transmute(__lasx_xvfmul_s(transmute(a), transmute(b))) } +pub fn lasx_xvclo_b(a: m256i) -> m256i { + unsafe { transmute(__lasx_xvclo_b(transmute(a))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfmul_d(a: m256d, b: m256d) -> m256d { - unsafe { transmute(__lasx_xvfmul_d(transmute(a), transmute(b))) } +pub fn lasx_xvclo_h(a: m256i) -> m256i { + unsafe { transmute(__lasx_xvclo_h(transmute(a))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfdiv_s(a: m256, b: m256) -> m256 { - unsafe { transmute(__lasx_xvfdiv_s(transmute(a), transmute(b))) } +pub fn lasx_xvclo_w(a: m256i) -> m256i { + unsafe { transmute(__lasx_xvclo_w(transmute(a))) } } #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfdiv_d(a: m256d, b: m256d) -> m256d { - unsafe { transmute(__lasx_xvfdiv_d(transmute(a), transmute(b))) } +pub fn lasx_xvclo_d(a: m256i) -> m256i { + unsafe { transmute(__lasx_xvclo_d(transmute(a))) } } #[inline(always)] @@ -3996,20 +2400,6 @@ pub fn lasx_xvfclass_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvfclass_d(transmute(a))) } } -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfsqrt_s(a: m256) -> m256 { - unsafe { transmute(__lasx_xvfsqrt_s(transmute(a))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfsqrt_d(a: m256d) -> m256d { - unsafe { transmute(__lasx_xvfsqrt_d(transmute(a))) } -} - #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4243,41 +2633,6 @@ pub fn lasx_xvpermi_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpermi_w(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvandn_v(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvandn_v(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvneg_b(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvneg_b(transmute(a))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvneg_h(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvneg_h(transmute(a))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvneg_w(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvneg_w(transmute(a))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvneg_d(a: m256i) -> m256i { - unsafe { transmute(__lasx_xvneg_d(transmute(a))) } -} - #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4749,62 +3104,6 @@ pub fn lasx_xvsigncov_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsigncov_d(transmute(a), transmute(b))) } } -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfmadd_s(a: m256, b: m256, c: m256) -> m256 { - unsafe { transmute(__lasx_xvfmadd_s(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfmadd_d(a: m256d, b: m256d, c: m256d) -> m256d { - unsafe { transmute(__lasx_xvfmadd_d(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfmsub_s(a: m256, b: m256, c: m256) -> m256 { - unsafe { transmute(__lasx_xvfmsub_s(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfmsub_d(a: m256d, b: m256d, c: m256d) -> m256d { - unsafe { transmute(__lasx_xvfmsub_d(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfnmadd_s(a: m256, b: m256, c: m256) -> m256 { - unsafe { transmute(__lasx_xvfnmadd_s(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfnmadd_d(a: m256d, b: m256d, c: m256d) -> m256d { - unsafe { transmute(__lasx_xvfnmadd_d(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfnmsub_s(a: m256, b: m256, c: m256) -> m256 { - unsafe { transmute(__lasx_xvfnmsub_s(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvfnmsub_d(a: m256d, b: m256d, c: m256d) -> m256d { - unsafe { transmute(__lasx_xvfnmsub_d(transmute(a), transmute(b), transmute(c))) } -} - #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5165,13 +3464,6 @@ pub fn lasx_xvssrln_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_w_d(transmute(a), transmute(b))) } } -#[inline(always)] -#[target_feature(enable = "lasx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvorn_v(a: m256i, b: m256i) -> m256i { - unsafe { transmute(__lasx_xvorn_v(transmute(a), transmute(b))) } -} - #[inline(always)] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] @@ -5202,24 +3494,6 @@ pub fn lasx_xvextl_qu_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvextl_qu_du(transmute(a))) } } -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvinsgr2vr_w(a: m256i, b: i32) -> m256i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lasx_xvinsgr2vr_w(transmute(a), transmute(b), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(2)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvinsgr2vr_d(a: m256i, b: i64) -> m256i { - static_assert_uimm_bits!(IMM2, 2); - unsafe { transmute(__lasx_xvinsgr2vr_d(transmute(a), transmute(b), IMM2)) } -} - #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5400,42 +3674,6 @@ pub unsafe fn lasx_xvldrepl_d(mem_addr: *const i8) -> m256i { transmute(__lasx_xvldrepl_d(mem_addr, IMM_S9)) } -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpickve2gr_w(a: m256i) -> i32 { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lasx_xvpickve2gr_w(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpickve2gr_wu(a: m256i) -> u32 { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lasx_xvpickve2gr_wu(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpickve2gr_d(a: m256i) -> i64 { - static_assert_uimm_bits!(IMM2, 2); - unsafe { transmute(__lasx_xvpickve2gr_d(transmute(a), IMM2)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvpickve2gr_du(a: m256i) -> u64 { - static_assert_uimm_bits!(IMM2, 2); - unsafe { transmute(__lasx_xvpickve2gr_du(transmute(a), IMM2)) } -} - #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -7063,42 +5301,6 @@ pub fn lasx_xvpickve_w_f(a: m256) -> m256 { unsafe { transmute(__lasx_xvpickve_w_f(transmute(a), IMM3)) } } -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(0)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvrepli_b() -> m256i { - static_assert_simm_bits!(IMM_S10, 10); - unsafe { transmute(__lasx_xvrepli_b(IMM_S10)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(0)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvrepli_d() -> m256i { - static_assert_simm_bits!(IMM_S10, 10); - unsafe { transmute(__lasx_xvrepli_d(IMM_S10)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(0)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvrepli_h() -> m256i { - static_assert_simm_bits!(IMM_S10, 10); - unsafe { transmute(__lasx_xvrepli_h(IMM_S10)) } -} - -#[inline(always)] -#[target_feature(enable = "lasx")] -#[rustc_legacy_const_generics(0)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lasx_xvrepli_w() -> m256i { - static_assert_simm_bits!(IMM_S10, 10); - unsafe { transmute(__lasx_xvrepli_w(IMM_S10)) } -} - #[inline(always)] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/mod.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/mod.rs index c3a244e740e9f..cc449e94924f0 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/mod.rs @@ -16,6 +16,13 @@ mod generated; #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub use self::generated::*; +#[rustfmt::skip] +mod portable; + +#[rustfmt::skip] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub use self::portable::*; + #[rustfmt::skip] #[cfg(test)] mod tests; diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/portable.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/portable.rs new file mode 100644 index 0000000000000..0021d7605f9aa --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/portable.rs @@ -0,0 +1,201 @@ +//! LoongArch64 LASX intrinsics - intrinsics::simd implementation + +use super::super::{simd::*, *}; +use crate::core_arch::simd::*; +use crate::intrinsics::simd::*; +use crate::mem::transmute; + +impl_vv!("lasx", lasx_xvpcnt_b, simd_ctpop, m256i, i8x32); +impl_vv!("lasx", lasx_xvpcnt_h, simd_ctpop, m256i, i16x16); +impl_vv!("lasx", lasx_xvpcnt_w, simd_ctpop, m256i, i32x8); +impl_vv!("lasx", lasx_xvpcnt_d, simd_ctpop, m256i, i64x4); +impl_vv!("lasx", lasx_xvclz_b, simd_ctlz, m256i, i8x32); +impl_vv!("lasx", lasx_xvclz_h, simd_ctlz, m256i, i16x16); +impl_vv!("lasx", lasx_xvclz_w, simd_ctlz, m256i, i32x8); +impl_vv!("lasx", lasx_xvclz_d, simd_ctlz, m256i, i64x4); +impl_vv!("lasx", lasx_xvneg_b, simd_neg, m256i, i8x32); +impl_vv!("lasx", lasx_xvneg_h, simd_neg, m256i, i16x16); +impl_vv!("lasx", lasx_xvneg_w, simd_neg, m256i, i32x8); +impl_vv!("lasx", lasx_xvneg_d, simd_neg, m256i, i64x4); +impl_vv!("lasx", lasx_xvfsqrt_s, simd_fsqrt, m256, f32x8); +impl_vv!("lasx", lasx_xvfsqrt_d, simd_fsqrt, m256d, f64x4); + +impl_gv!("lasx", lasx_xvreplgr2vr_b, simdl_splat, m256i, i8x32, i32); +impl_gv!("lasx", lasx_xvreplgr2vr_h, simdl_splat, m256i, i16x16, i32); +impl_gv!("lasx", lasx_xvreplgr2vr_w, simdl_splat, m256i, i32x8, i32); +impl_gv!("lasx", lasx_xvreplgr2vr_d, simdl_splat, m256i, i64x4, i64); + +impl_sv!("lasx", lasx_xvrepli_b, simdl_splat, m256i, i8x32, 10); +impl_sv!("lasx", lasx_xvrepli_h, simdl_splat, m256i, i16x16, 10); +impl_sv!("lasx", lasx_xvrepli_w, simdl_splat, m256i, i32x8, 10); +impl_sv!("lasx", lasx_xvrepli_d, simdl_splat, m256i, i64x4, 10); + +impl_vvv!("lasx", lasx_xvadd_b, simd_add, m256i, i8x32); +impl_vvv!("lasx", lasx_xvadd_h, simd_add, m256i, i16x16); +impl_vvv!("lasx", lasx_xvadd_w, simd_add, m256i, i32x8); +impl_vvv!("lasx", lasx_xvadd_d, simd_add, m256i, i64x4); +impl_vvv!("lasx", lasx_xvsub_b, simd_sub, m256i, i8x32); +impl_vvv!("lasx", lasx_xvsub_h, simd_sub, m256i, i16x16); +impl_vvv!("lasx", lasx_xvsub_w, simd_sub, m256i, i32x8); +impl_vvv!("lasx", lasx_xvsub_d, simd_sub, m256i, i64x4); +impl_vvv!("lasx", lasx_xvmax_b, simd_imax, m256i, i8x32); +impl_vvv!("lasx", lasx_xvmax_h, simd_imax, m256i, i16x16); +impl_vvv!("lasx", lasx_xvmax_w, simd_imax, m256i, i32x8); +impl_vvv!("lasx", lasx_xvmax_d, simd_imax, m256i, i64x4); +impl_vvv!("lasx", lasx_xvmax_bu, simd_imax, m256i, u8x32); +impl_vvv!("lasx", lasx_xvmax_hu, simd_imax, m256i, u16x16); +impl_vvv!("lasx", lasx_xvmax_wu, simd_imax, m256i, u32x8); +impl_vvv!("lasx", lasx_xvmax_du, simd_imax, m256i, u64x4); +impl_vvv!("lasx", lasx_xvmin_b, simd_imin, m256i, i8x32); +impl_vvv!("lasx", lasx_xvmin_h, simd_imin, m256i, i16x16); +impl_vvv!("lasx", lasx_xvmin_w, simd_imin, m256i, i32x8); +impl_vvv!("lasx", lasx_xvmin_d, simd_imin, m256i, i64x4); +impl_vvv!("lasx", lasx_xvmin_bu, simd_imin, m256i, u8x32); +impl_vvv!("lasx", lasx_xvmin_hu, simd_imin, m256i, u16x16); +impl_vvv!("lasx", lasx_xvmin_wu, simd_imin, m256i, u32x8); +impl_vvv!("lasx", lasx_xvmin_du, simd_imin, m256i, u64x4); +impl_vvv!("lasx", lasx_xvseq_b, simd_eq, m256i, i8x32); +impl_vvv!("lasx", lasx_xvseq_h, simd_eq, m256i, i16x16); +impl_vvv!("lasx", lasx_xvseq_w, simd_eq, m256i, i32x8); +impl_vvv!("lasx", lasx_xvseq_d, simd_eq, m256i, i64x4); +impl_vvv!("lasx", lasx_xvslt_b, simd_lt, m256i, i8x32); +impl_vvv!("lasx", lasx_xvslt_h, simd_lt, m256i, i16x16); +impl_vvv!("lasx", lasx_xvslt_w, simd_lt, m256i, i32x8); +impl_vvv!("lasx", lasx_xvslt_d, simd_lt, m256i, i64x4); +impl_vvv!("lasx", lasx_xvslt_bu, simd_lt, m256i, u8x32); +impl_vvv!("lasx", lasx_xvslt_hu, simd_lt, m256i, u16x16); +impl_vvv!("lasx", lasx_xvslt_wu, simd_lt, m256i, u32x8); +impl_vvv!("lasx", lasx_xvslt_du, simd_lt, m256i, u64x4); +impl_vvv!("lasx", lasx_xvsle_b, simd_le, m256i, i8x32); +impl_vvv!("lasx", lasx_xvsle_h, simd_le, m256i, i16x16); +impl_vvv!("lasx", lasx_xvsle_w, simd_le, m256i, i32x8); +impl_vvv!("lasx", lasx_xvsle_d, simd_le, m256i, i64x4); +impl_vvv!("lasx", lasx_xvsle_bu, simd_le, m256i, u8x32); +impl_vvv!("lasx", lasx_xvsle_hu, simd_le, m256i, u16x16); +impl_vvv!("lasx", lasx_xvsle_wu, simd_le, m256i, u32x8); +impl_vvv!("lasx", lasx_xvsle_du, simd_le, m256i, u64x4); +impl_vvv!("lasx", lasx_xvmul_b, simd_mul, m256i, i8x32); +impl_vvv!("lasx", lasx_xvmul_h, simd_mul, m256i, i16x16); +impl_vvv!("lasx", lasx_xvmul_w, simd_mul, m256i, i32x8); +impl_vvv!("lasx", lasx_xvmul_d, simd_mul, m256i, i64x4); +impl_vvv!("lasx", lasx_xvdiv_b, simd_div, m256i, i8x32); +impl_vvv!("lasx", lasx_xvdiv_h, simd_div, m256i, i16x16); +impl_vvv!("lasx", lasx_xvdiv_w, simd_div, m256i, i32x8); +impl_vvv!("lasx", lasx_xvdiv_d, simd_div, m256i, i64x4); +impl_vvv!("lasx", lasx_xvdiv_bu, simd_div, m256i, u8x32); +impl_vvv!("lasx", lasx_xvdiv_hu, simd_div, m256i, u16x16); +impl_vvv!("lasx", lasx_xvdiv_wu, simd_div, m256i, u32x8); +impl_vvv!("lasx", lasx_xvdiv_du, simd_div, m256i, u64x4); +impl_vvv!("lasx", lasx_xvmod_b, simd_rem, m256i, i8x32); +impl_vvv!("lasx", lasx_xvmod_h, simd_rem, m256i, i16x16); +impl_vvv!("lasx", lasx_xvmod_w, simd_rem, m256i, i32x8); +impl_vvv!("lasx", lasx_xvmod_d, simd_rem, m256i, i64x4); +impl_vvv!("lasx", lasx_xvmod_bu, simd_rem, m256i, u8x32); +impl_vvv!("lasx", lasx_xvmod_hu, simd_rem, m256i, u16x16); +impl_vvv!("lasx", lasx_xvmod_wu, simd_rem, m256i, u32x8); +impl_vvv!("lasx", lasx_xvmod_du, simd_rem, m256i, u64x4); +impl_vvv!("lasx", lasx_xvand_v, simd_and, m256i, u8x32); +impl_vvv!("lasx", lasx_xvandn_v, simdl_andn, m256i, u8x32); +impl_vvv!("lasx", lasx_xvor_v, simd_or, m256i, u8x32); +impl_vvv!("lasx", lasx_xvorn_v, simdl_orn, m256i, u8x32); +impl_vvv!("lasx", lasx_xvnor_v, simdl_nor, m256i, u8x32); +impl_vvv!("lasx", lasx_xvxor_v, simd_xor, m256i, u8x32); +impl_vvv!("lasx", lasx_xvfadd_s, simd_add, m256, f32x8); +impl_vvv!("lasx", lasx_xvfadd_d, simd_add, m256d, f64x4); +impl_vvv!("lasx", lasx_xvfsub_s, simd_sub, m256, f32x8); +impl_vvv!("lasx", lasx_xvfsub_d, simd_sub, m256d, f64x4); +impl_vvv!("lasx", lasx_xvfmul_s, simd_mul, m256, f32x8); +impl_vvv!("lasx", lasx_xvfmul_d, simd_mul, m256d, f64x4); +impl_vvv!("lasx", lasx_xvfdiv_s, simd_div, m256, f32x8); +impl_vvv!("lasx", lasx_xvfdiv_d, simd_div, m256d, f64x4); +impl_vvv!("lasx", lasx_xvsll_b, simdl_shl, m256i, i8x32); +impl_vvv!("lasx", lasx_xvsll_h, simdl_shl, m256i, i16x16); +impl_vvv!("lasx", lasx_xvsll_w, simdl_shl, m256i, i32x8); +impl_vvv!("lasx", lasx_xvsll_d, simdl_shl, m256i, i64x4); +impl_vvv!("lasx", lasx_xvsra_b, simdl_shr, m256i, i8x32); +impl_vvv!("lasx", lasx_xvsra_h, simdl_shr, m256i, i16x16); +impl_vvv!("lasx", lasx_xvsra_w, simdl_shr, m256i, i32x8); +impl_vvv!("lasx", lasx_xvsra_d, simdl_shr, m256i, i64x4); +impl_vvv!("lasx", lasx_xvsrl_b, simdl_shr, m256i, u8x32); +impl_vvv!("lasx", lasx_xvsrl_h, simdl_shr, m256i, u16x16); +impl_vvv!("lasx", lasx_xvsrl_w, simdl_shr, m256i, u32x8); +impl_vvv!("lasx", lasx_xvsrl_d, simdl_shr, m256i, u64x4); + +impl_vuv!("lasx", lasx_xvslli_b, simd_shl, m256i, i8x32); +impl_vuv!("lasx", lasx_xvslli_h, simd_shl, m256i, i16x16); +impl_vuv!("lasx", lasx_xvslli_w, simd_shl, m256i, i32x8); +impl_vuv!("lasx", lasx_xvslli_d, simd_shl, m256i, i64x4); +impl_vuv!("lasx", lasx_xvsrai_b, simd_shr, m256i, i8x32); +impl_vuv!("lasx", lasx_xvsrai_h, simd_shr, m256i, i16x16); +impl_vuv!("lasx", lasx_xvsrai_w, simd_shr, m256i, i32x8); +impl_vuv!("lasx", lasx_xvsrai_d, simd_shr, m256i, i64x4); +impl_vuv!("lasx", lasx_xvsrli_b, simd_shr, m256i, u8x32); +impl_vuv!("lasx", lasx_xvsrli_h, simd_shr, m256i, u16x16); +impl_vuv!("lasx", lasx_xvsrli_w, simd_shr, m256i, u32x8); +impl_vuv!("lasx", lasx_xvsrli_d, simd_shr, m256i, u64x4); +impl_vuv!("lasx", lasx_xvaddi_bu, simd_add, m256i, u8x32, 5); +impl_vuv!("lasx", lasx_xvaddi_hu, simd_add, m256i, u16x16, 5); +impl_vuv!("lasx", lasx_xvaddi_wu, simd_add, m256i, u32x8, 5); +impl_vuv!("lasx", lasx_xvaddi_du, simd_add, m256i, u64x4, 5); +impl_vuv!("lasx", lasx_xvslti_bu, simd_lt, m256i, u8x32, 5); +impl_vuv!("lasx", lasx_xvslti_hu, simd_lt, m256i, u16x16, 5); +impl_vuv!("lasx", lasx_xvslti_wu, simd_lt, m256i, u32x8, 5); +impl_vuv!("lasx", lasx_xvslti_du, simd_lt, m256i, u64x4, 5); +impl_vuv!("lasx", lasx_xvslei_bu, simd_le, m256i, u8x32, 5); +impl_vuv!("lasx", lasx_xvslei_hu, simd_le, m256i, u16x16, 5); +impl_vuv!("lasx", lasx_xvslei_wu, simd_le, m256i, u32x8, 5); +impl_vuv!("lasx", lasx_xvslei_du, simd_le, m256i, u64x4, 5); +impl_vuv!("lasx", lasx_xvmaxi_bu, simd_imax, m256i, u8x32, 5); +impl_vuv!("lasx", lasx_xvmaxi_hu, simd_imax, m256i, u16x16, 5); +impl_vuv!("lasx", lasx_xvmaxi_wu, simd_imax, m256i, u32x8, 5); +impl_vuv!("lasx", lasx_xvmaxi_du, simd_imax, m256i, u64x4, 5); +impl_vuv!("lasx", lasx_xvmini_bu, simd_imin, m256i, u8x32, 5); +impl_vuv!("lasx", lasx_xvmini_hu, simd_imin, m256i, u16x16, 5); +impl_vuv!("lasx", lasx_xvmini_wu, simd_imin, m256i, u32x8, 5); +impl_vuv!("lasx", lasx_xvmini_du, simd_imin, m256i, u64x4, 5); + +impl_vug!("lasx", lasx_xvpickve2gr_w, simd_extract, m256i, i32x8, i32, 3); +impl_vug!("lasx", lasx_xvpickve2gr_d, simd_extract, m256i, i64x4, i64, 2); +impl_vug!("lasx", lasx_xvpickve2gr_wu, simd_extract, m256i, u32x8, u32, 3); +impl_vug!("lasx", lasx_xvpickve2gr_du, simd_extract, m256i, u64x4, u64, 2); + +impl_vsv!("lasx", lasx_xvseqi_b, simd_eq, m256i, i8x32, 5); +impl_vsv!("lasx", lasx_xvseqi_h, simd_eq, m256i, i16x16, 5); +impl_vsv!("lasx", lasx_xvseqi_w, simd_eq, m256i, i32x8, 5); +impl_vsv!("lasx", lasx_xvseqi_d, simd_eq, m256i, i64x4, 5); +impl_vsv!("lasx", lasx_xvslti_b, simd_lt, m256i, i8x32, 5); +impl_vsv!("lasx", lasx_xvslti_h, simd_lt, m256i, i16x16, 5); +impl_vsv!("lasx", lasx_xvslti_w, simd_lt, m256i, i32x8, 5); +impl_vsv!("lasx", lasx_xvslti_d, simd_lt, m256i, i64x4, 5); +impl_vsv!("lasx", lasx_xvslei_b, simd_le, m256i, i8x32, 5); +impl_vsv!("lasx", lasx_xvslei_h, simd_le, m256i, i16x16, 5); +impl_vsv!("lasx", lasx_xvslei_w, simd_le, m256i, i32x8, 5); +impl_vsv!("lasx", lasx_xvslei_d, simd_le, m256i, i64x4, 5); +impl_vsv!("lasx", lasx_xvmaxi_b, simd_imax, m256i, i8x32, 5); +impl_vsv!("lasx", lasx_xvmaxi_h, simd_imax, m256i, i16x16, 5); +impl_vsv!("lasx", lasx_xvmaxi_w, simd_imax, m256i, i32x8, 5); +impl_vsv!("lasx", lasx_xvmaxi_d, simd_imax, m256i, i64x4, 5); +impl_vsv!("lasx", lasx_xvmini_b, simd_imin, m256i, i8x32, 5); +impl_vsv!("lasx", lasx_xvmini_h, simd_imin, m256i, i16x16, 5); +impl_vsv!("lasx", lasx_xvmini_w, simd_imin, m256i, i32x8, 5); +impl_vsv!("lasx", lasx_xvmini_d, simd_imin, m256i, i64x4, 5); + +impl_vvvv!("lasx", lasx_xvmadd_b, simdl_madd, m256i, i8x32); +impl_vvvv!("lasx", lasx_xvmadd_h, simdl_madd, m256i, i16x16); +impl_vvvv!("lasx", lasx_xvmadd_w, simdl_madd, m256i, i32x8); +impl_vvvv!("lasx", lasx_xvmadd_d, simdl_madd, m256i, i64x4); +impl_vvvv!("lasx", lasx_xvmsub_b, simdl_msub, m256i, i8x32); +impl_vvvv!("lasx", lasx_xvmsub_h, simdl_msub, m256i, i16x16); +impl_vvvv!("lasx", lasx_xvmsub_w, simdl_msub, m256i, i32x8); +impl_vvvv!("lasx", lasx_xvmsub_d, simdl_msub, m256i, i64x4); +impl_vvvv!("lasx", lasx_xvfmadd_s, simd_fma, m256, f32x8); +impl_vvvv!("lasx", lasx_xvfmadd_d, simd_fma, m256d, f64x4); +impl_vvvv!("lasx", lasx_xvfmsub_s, simdl_fms, m256, f32x8); +impl_vvvv!("lasx", lasx_xvfmsub_d, simdl_fms, m256d, f64x4); +impl_vvvv!("lasx", lasx_xvfnmadd_s, simdl_nfma, m256, f32x8); +impl_vvvv!("lasx", lasx_xvfnmadd_d, simdl_nfma, m256d, f64x4); +impl_vvvv!("lasx", lasx_xvfnmsub_s, simdl_nfms, m256, f32x8); +impl_vvvv!("lasx", lasx_xvfnmsub_d, simdl_nfms, m256d, f64x4); + +impl_vugv!("lasx", lasx_xvinsgr2vr_w, simd_insert, m256i, i32x8, i32, 3); +impl_vugv!("lasx", lasx_xvinsgr2vr_d, simd_insert, m256i, i64x4, i64, 2); diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs index faa8859eba777..d2d77e2f3e932 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs @@ -11,38 +11,6 @@ use super::super::*; #[allow(improper_ctypes)] unsafe extern "unadjusted" { - #[link_name = "llvm.loongarch.lsx.vsll.b"] - fn __lsx_vsll_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vsll.h"] - fn __lsx_vsll_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vsll.w"] - fn __lsx_vsll_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vsll.d"] - fn __lsx_vsll_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vslli.b"] - fn __lsx_vslli_b(a: __v16i8, b: u32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vslli.h"] - fn __lsx_vslli_h(a: __v8i16, b: u32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vslli.w"] - fn __lsx_vslli_w(a: __v4i32, b: u32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vslli.d"] - fn __lsx_vslli_d(a: __v2i64, b: u32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vsra.b"] - fn __lsx_vsra_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vsra.h"] - fn __lsx_vsra_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vsra.w"] - fn __lsx_vsra_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vsra.d"] - fn __lsx_vsra_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vsrai.b"] - fn __lsx_vsrai_b(a: __v16i8, b: u32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vsrai.h"] - fn __lsx_vsrai_h(a: __v8i16, b: u32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vsrai.w"] - fn __lsx_vsrai_w(a: __v4i32, b: u32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vsrai.d"] - fn __lsx_vsrai_d(a: __v2i64, b: u32) -> __v2i64; #[link_name = "llvm.loongarch.lsx.vsrar.b"] fn __lsx_vsrar_b(a: __v16i8, b: __v16i8) -> __v16i8; #[link_name = "llvm.loongarch.lsx.vsrar.h"] @@ -59,22 +27,6 @@ unsafe extern "unadjusted" { fn __lsx_vsrari_w(a: __v4i32, b: u32) -> __v4i32; #[link_name = "llvm.loongarch.lsx.vsrari.d"] fn __lsx_vsrari_d(a: __v2i64, b: u32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vsrl.b"] - fn __lsx_vsrl_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vsrl.h"] - fn __lsx_vsrl_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vsrl.w"] - fn __lsx_vsrl_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vsrl.d"] - fn __lsx_vsrl_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vsrli.b"] - fn __lsx_vsrli_b(a: __v16i8, b: u32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vsrli.h"] - fn __lsx_vsrli_h(a: __v8i16, b: u32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vsrli.w"] - fn __lsx_vsrli_w(a: __v4i32, b: u32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vsrli.d"] - fn __lsx_vsrli_d(a: __v2i64, b: u32) -> __v2i64; #[link_name = "llvm.loongarch.lsx.vsrlr.b"] fn __lsx_vsrlr_b(a: __v16i8, b: __v16i8) -> __v16i8; #[link_name = "llvm.loongarch.lsx.vsrlr.h"] @@ -139,30 +91,6 @@ unsafe extern "unadjusted" { fn __lsx_vbitrevi_w(a: __v4u32, b: u32) -> __v4u32; #[link_name = "llvm.loongarch.lsx.vbitrevi.d"] fn __lsx_vbitrevi_d(a: __v2u64, b: u32) -> __v2u64; - #[link_name = "llvm.loongarch.lsx.vadd.b"] - fn __lsx_vadd_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vadd.h"] - fn __lsx_vadd_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vadd.w"] - fn __lsx_vadd_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vadd.d"] - fn __lsx_vadd_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vaddi.bu"] - fn __lsx_vaddi_bu(a: __v16i8, b: u32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vaddi.hu"] - fn __lsx_vaddi_hu(a: __v8i16, b: u32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vaddi.wu"] - fn __lsx_vaddi_wu(a: __v4i32, b: u32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vaddi.du"] - fn __lsx_vaddi_du(a: __v2i64, b: u32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vsub.b"] - fn __lsx_vsub_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vsub.h"] - fn __lsx_vsub_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vsub.w"] - fn __lsx_vsub_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vsub.d"] - fn __lsx_vsub_d(a: __v2i64, b: __v2i64) -> __v2i64; #[link_name = "llvm.loongarch.lsx.vsubi.bu"] fn __lsx_vsubi_bu(a: __v16i8, b: u32) -> __v16i8; #[link_name = "llvm.loongarch.lsx.vsubi.hu"] @@ -171,150 +99,6 @@ unsafe extern "unadjusted" { fn __lsx_vsubi_wu(a: __v4i32, b: u32) -> __v4i32; #[link_name = "llvm.loongarch.lsx.vsubi.du"] fn __lsx_vsubi_du(a: __v2i64, b: u32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vmax.b"] - fn __lsx_vmax_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vmax.h"] - fn __lsx_vmax_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vmax.w"] - fn __lsx_vmax_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vmax.d"] - fn __lsx_vmax_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vmaxi.b"] - fn __lsx_vmaxi_b(a: __v16i8, b: i32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vmaxi.h"] - fn __lsx_vmaxi_h(a: __v8i16, b: i32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vmaxi.w"] - fn __lsx_vmaxi_w(a: __v4i32, b: i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vmaxi.d"] - fn __lsx_vmaxi_d(a: __v2i64, b: i32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vmax.bu"] - fn __lsx_vmax_bu(a: __v16u8, b: __v16u8) -> __v16u8; - #[link_name = "llvm.loongarch.lsx.vmax.hu"] - fn __lsx_vmax_hu(a: __v8u16, b: __v8u16) -> __v8u16; - #[link_name = "llvm.loongarch.lsx.vmax.wu"] - fn __lsx_vmax_wu(a: __v4u32, b: __v4u32) -> __v4u32; - #[link_name = "llvm.loongarch.lsx.vmax.du"] - fn __lsx_vmax_du(a: __v2u64, b: __v2u64) -> __v2u64; - #[link_name = "llvm.loongarch.lsx.vmaxi.bu"] - fn __lsx_vmaxi_bu(a: __v16u8, b: u32) -> __v16u8; - #[link_name = "llvm.loongarch.lsx.vmaxi.hu"] - fn __lsx_vmaxi_hu(a: __v8u16, b: u32) -> __v8u16; - #[link_name = "llvm.loongarch.lsx.vmaxi.wu"] - fn __lsx_vmaxi_wu(a: __v4u32, b: u32) -> __v4u32; - #[link_name = "llvm.loongarch.lsx.vmaxi.du"] - fn __lsx_vmaxi_du(a: __v2u64, b: u32) -> __v2u64; - #[link_name = "llvm.loongarch.lsx.vmin.b"] - fn __lsx_vmin_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vmin.h"] - fn __lsx_vmin_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vmin.w"] - fn __lsx_vmin_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vmin.d"] - fn __lsx_vmin_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vmini.b"] - fn __lsx_vmini_b(a: __v16i8, b: i32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vmini.h"] - fn __lsx_vmini_h(a: __v8i16, b: i32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vmini.w"] - fn __lsx_vmini_w(a: __v4i32, b: i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vmini.d"] - fn __lsx_vmini_d(a: __v2i64, b: i32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vmin.bu"] - fn __lsx_vmin_bu(a: __v16u8, b: __v16u8) -> __v16u8; - #[link_name = "llvm.loongarch.lsx.vmin.hu"] - fn __lsx_vmin_hu(a: __v8u16, b: __v8u16) -> __v8u16; - #[link_name = "llvm.loongarch.lsx.vmin.wu"] - fn __lsx_vmin_wu(a: __v4u32, b: __v4u32) -> __v4u32; - #[link_name = "llvm.loongarch.lsx.vmin.du"] - fn __lsx_vmin_du(a: __v2u64, b: __v2u64) -> __v2u64; - #[link_name = "llvm.loongarch.lsx.vmini.bu"] - fn __lsx_vmini_bu(a: __v16u8, b: u32) -> __v16u8; - #[link_name = "llvm.loongarch.lsx.vmini.hu"] - fn __lsx_vmini_hu(a: __v8u16, b: u32) -> __v8u16; - #[link_name = "llvm.loongarch.lsx.vmini.wu"] - fn __lsx_vmini_wu(a: __v4u32, b: u32) -> __v4u32; - #[link_name = "llvm.loongarch.lsx.vmini.du"] - fn __lsx_vmini_du(a: __v2u64, b: u32) -> __v2u64; - #[link_name = "llvm.loongarch.lsx.vseq.b"] - fn __lsx_vseq_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vseq.h"] - fn __lsx_vseq_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vseq.w"] - fn __lsx_vseq_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vseq.d"] - fn __lsx_vseq_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vseqi.b"] - fn __lsx_vseqi_b(a: __v16i8, b: i32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vseqi.h"] - fn __lsx_vseqi_h(a: __v8i16, b: i32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vseqi.w"] - fn __lsx_vseqi_w(a: __v4i32, b: i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vseqi.d"] - fn __lsx_vseqi_d(a: __v2i64, b: i32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vslti.b"] - fn __lsx_vslti_b(a: __v16i8, b: i32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vslt.b"] - fn __lsx_vslt_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vslt.h"] - fn __lsx_vslt_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vslt.w"] - fn __lsx_vslt_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vslt.d"] - fn __lsx_vslt_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vslti.h"] - fn __lsx_vslti_h(a: __v8i16, b: i32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vslti.w"] - fn __lsx_vslti_w(a: __v4i32, b: i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vslti.d"] - fn __lsx_vslti_d(a: __v2i64, b: i32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vslt.bu"] - fn __lsx_vslt_bu(a: __v16u8, b: __v16u8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vslt.hu"] - fn __lsx_vslt_hu(a: __v8u16, b: __v8u16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vslt.wu"] - fn __lsx_vslt_wu(a: __v4u32, b: __v4u32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vslt.du"] - fn __lsx_vslt_du(a: __v2u64, b: __v2u64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vslti.bu"] - fn __lsx_vslti_bu(a: __v16u8, b: u32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vslti.hu"] - fn __lsx_vslti_hu(a: __v8u16, b: u32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vslti.wu"] - fn __lsx_vslti_wu(a: __v4u32, b: u32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vslti.du"] - fn __lsx_vslti_du(a: __v2u64, b: u32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vsle.b"] - fn __lsx_vsle_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vsle.h"] - fn __lsx_vsle_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vsle.w"] - fn __lsx_vsle_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vsle.d"] - fn __lsx_vsle_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vslei.b"] - fn __lsx_vslei_b(a: __v16i8, b: i32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vslei.h"] - fn __lsx_vslei_h(a: __v8i16, b: i32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vslei.w"] - fn __lsx_vslei_w(a: __v4i32, b: i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vslei.d"] - fn __lsx_vslei_d(a: __v2i64, b: i32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vsle.bu"] - fn __lsx_vsle_bu(a: __v16u8, b: __v16u8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vsle.hu"] - fn __lsx_vsle_hu(a: __v8u16, b: __v8u16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vsle.wu"] - fn __lsx_vsle_wu(a: __v4u32, b: __v4u32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vsle.du"] - fn __lsx_vsle_du(a: __v2u64, b: __v2u64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vslei.bu"] - fn __lsx_vslei_bu(a: __v16u8, b: u32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vslei.hu"] - fn __lsx_vslei_hu(a: __v8u16, b: u32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vslei.wu"] - fn __lsx_vslei_wu(a: __v4u32, b: u32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vslei.du"] - fn __lsx_vslei_du(a: __v2u64, b: u32) -> __v2i64; #[link_name = "llvm.loongarch.lsx.vsat.b"] fn __lsx_vsat_b(a: __v16i8, b: u32) -> __v16i8; #[link_name = "llvm.loongarch.lsx.vsat.h"] @@ -419,46 +203,6 @@ unsafe extern "unadjusted" { fn __lsx_vabsd_wu(a: __v4u32, b: __v4u32) -> __v4u32; #[link_name = "llvm.loongarch.lsx.vabsd.du"] fn __lsx_vabsd_du(a: __v2u64, b: __v2u64) -> __v2u64; - #[link_name = "llvm.loongarch.lsx.vmul.b"] - fn __lsx_vmul_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vmul.h"] - fn __lsx_vmul_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vmul.w"] - fn __lsx_vmul_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vmul.d"] - fn __lsx_vmul_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vmadd.b"] - fn __lsx_vmadd_b(a: __v16i8, b: __v16i8, c: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vmadd.h"] - fn __lsx_vmadd_h(a: __v8i16, b: __v8i16, c: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vmadd.w"] - fn __lsx_vmadd_w(a: __v4i32, b: __v4i32, c: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vmadd.d"] - fn __lsx_vmadd_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vmsub.b"] - fn __lsx_vmsub_b(a: __v16i8, b: __v16i8, c: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vmsub.h"] - fn __lsx_vmsub_h(a: __v8i16, b: __v8i16, c: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vmsub.w"] - fn __lsx_vmsub_w(a: __v4i32, b: __v4i32, c: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vmsub.d"] - fn __lsx_vmsub_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vdiv.b"] - fn __lsx_vdiv_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vdiv.h"] - fn __lsx_vdiv_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vdiv.w"] - fn __lsx_vdiv_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vdiv.d"] - fn __lsx_vdiv_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vdiv.bu"] - fn __lsx_vdiv_bu(a: __v16u8, b: __v16u8) -> __v16u8; - #[link_name = "llvm.loongarch.lsx.vdiv.hu"] - fn __lsx_vdiv_hu(a: __v8u16, b: __v8u16) -> __v8u16; - #[link_name = "llvm.loongarch.lsx.vdiv.wu"] - fn __lsx_vdiv_wu(a: __v4u32, b: __v4u32) -> __v4u32; - #[link_name = "llvm.loongarch.lsx.vdiv.du"] - fn __lsx_vdiv_du(a: __v2u64, b: __v2u64) -> __v2u64; #[link_name = "llvm.loongarch.lsx.vhaddw.h.b"] fn __lsx_vhaddw_h_b(a: __v16i8, b: __v16i8) -> __v8i16; #[link_name = "llvm.loongarch.lsx.vhaddw.w.h"] @@ -483,22 +227,6 @@ unsafe extern "unadjusted" { fn __lsx_vhsubw_wu_hu(a: __v8u16, b: __v8u16) -> __v4i32; #[link_name = "llvm.loongarch.lsx.vhsubw.du.wu"] fn __lsx_vhsubw_du_wu(a: __v4u32, b: __v4u32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vmod.b"] - fn __lsx_vmod_b(a: __v16i8, b: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vmod.h"] - fn __lsx_vmod_h(a: __v8i16, b: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vmod.w"] - fn __lsx_vmod_w(a: __v4i32, b: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vmod.d"] - fn __lsx_vmod_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vmod.bu"] - fn __lsx_vmod_bu(a: __v16u8, b: __v16u8) -> __v16u8; - #[link_name = "llvm.loongarch.lsx.vmod.hu"] - fn __lsx_vmod_hu(a: __v8u16, b: __v8u16) -> __v8u16; - #[link_name = "llvm.loongarch.lsx.vmod.wu"] - fn __lsx_vmod_wu(a: __v4u32, b: __v4u32) -> __v4u32; - #[link_name = "llvm.loongarch.lsx.vmod.du"] - fn __lsx_vmod_du(a: __v2u64, b: __v2u64) -> __v2u64; #[link_name = "llvm.loongarch.lsx.vreplve.b"] fn __lsx_vreplve_b(a: __v16i8, b: i32) -> __v16i8; #[link_name = "llvm.loongarch.lsx.vreplve.h"] @@ -569,20 +297,12 @@ unsafe extern "unadjusted" { fn __lsx_vshuf_w(a: __v4i32, b: __v4i32, c: __v4i32) -> __v4i32; #[link_name = "llvm.loongarch.lsx.vshuf.d"] fn __lsx_vshuf_d(a: __v2i64, b: __v2i64, c: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vand.v"] - fn __lsx_vand_v(a: __v16u8, b: __v16u8) -> __v16u8; #[link_name = "llvm.loongarch.lsx.vandi.b"] fn __lsx_vandi_b(a: __v16u8, b: u32) -> __v16u8; - #[link_name = "llvm.loongarch.lsx.vor.v"] - fn __lsx_vor_v(a: __v16u8, b: __v16u8) -> __v16u8; #[link_name = "llvm.loongarch.lsx.vori.b"] fn __lsx_vori_b(a: __v16u8, b: u32) -> __v16u8; - #[link_name = "llvm.loongarch.lsx.vnor.v"] - fn __lsx_vnor_v(a: __v16u8, b: __v16u8) -> __v16u8; #[link_name = "llvm.loongarch.lsx.vnori.b"] fn __lsx_vnori_b(a: __v16u8, b: u32) -> __v16u8; - #[link_name = "llvm.loongarch.lsx.vxor.v"] - fn __lsx_vxor_v(a: __v16u8, b: __v16u8) -> __v16u8; #[link_name = "llvm.loongarch.lsx.vxori.b"] fn __lsx_vxori_b(a: __v16u8, b: u32) -> __v16u8; #[link_name = "llvm.loongarch.lsx.vbitsel.v"] @@ -595,22 +315,6 @@ unsafe extern "unadjusted" { fn __lsx_vshuf4i_h(a: __v8i16, b: u32) -> __v8i16; #[link_name = "llvm.loongarch.lsx.vshuf4i.w"] fn __lsx_vshuf4i_w(a: __v4i32, b: u32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vreplgr2vr.b"] - fn __lsx_vreplgr2vr_b(a: i32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vreplgr2vr.h"] - fn __lsx_vreplgr2vr_h(a: i32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vreplgr2vr.w"] - fn __lsx_vreplgr2vr_w(a: i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vreplgr2vr.d"] - fn __lsx_vreplgr2vr_d(a: i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vpcnt.b"] - fn __lsx_vpcnt_b(a: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vpcnt.h"] - fn __lsx_vpcnt_h(a: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vpcnt.w"] - fn __lsx_vpcnt_w(a: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vpcnt.d"] - fn __lsx_vpcnt_d(a: __v2i64) -> __v2i64; #[link_name = "llvm.loongarch.lsx.vclo.b"] fn __lsx_vclo_b(a: __v16i8) -> __v16i8; #[link_name = "llvm.loongarch.lsx.vclo.h"] @@ -619,54 +323,6 @@ unsafe extern "unadjusted" { fn __lsx_vclo_w(a: __v4i32) -> __v4i32; #[link_name = "llvm.loongarch.lsx.vclo.d"] fn __lsx_vclo_d(a: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vclz.b"] - fn __lsx_vclz_b(a: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vclz.h"] - fn __lsx_vclz_h(a: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vclz.w"] - fn __lsx_vclz_w(a: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vclz.d"] - fn __lsx_vclz_d(a: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vpickve2gr.b"] - fn __lsx_vpickve2gr_b(a: __v16i8, b: u32) -> i32; - #[link_name = "llvm.loongarch.lsx.vpickve2gr.h"] - fn __lsx_vpickve2gr_h(a: __v8i16, b: u32) -> i32; - #[link_name = "llvm.loongarch.lsx.vpickve2gr.w"] - fn __lsx_vpickve2gr_w(a: __v4i32, b: u32) -> i32; - #[link_name = "llvm.loongarch.lsx.vpickve2gr.d"] - fn __lsx_vpickve2gr_d(a: __v2i64, b: u32) -> i64; - #[link_name = "llvm.loongarch.lsx.vpickve2gr.bu"] - fn __lsx_vpickve2gr_bu(a: __v16i8, b: u32) -> u32; - #[link_name = "llvm.loongarch.lsx.vpickve2gr.hu"] - fn __lsx_vpickve2gr_hu(a: __v8i16, b: u32) -> u32; - #[link_name = "llvm.loongarch.lsx.vpickve2gr.wu"] - fn __lsx_vpickve2gr_wu(a: __v4i32, b: u32) -> u32; - #[link_name = "llvm.loongarch.lsx.vpickve2gr.du"] - fn __lsx_vpickve2gr_du(a: __v2i64, b: u32) -> u64; - #[link_name = "llvm.loongarch.lsx.vinsgr2vr.b"] - fn __lsx_vinsgr2vr_b(a: __v16i8, b: i32, c: u32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vinsgr2vr.h"] - fn __lsx_vinsgr2vr_h(a: __v8i16, b: i32, c: u32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vinsgr2vr.w"] - fn __lsx_vinsgr2vr_w(a: __v4i32, b: i32, c: u32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vinsgr2vr.d"] - fn __lsx_vinsgr2vr_d(a: __v2i64, b: i64, c: u32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vfadd.s"] - fn __lsx_vfadd_s(a: __v4f32, b: __v4f32) -> __v4f32; - #[link_name = "llvm.loongarch.lsx.vfadd.d"] - fn __lsx_vfadd_d(a: __v2f64, b: __v2f64) -> __v2f64; - #[link_name = "llvm.loongarch.lsx.vfsub.s"] - fn __lsx_vfsub_s(a: __v4f32, b: __v4f32) -> __v4f32; - #[link_name = "llvm.loongarch.lsx.vfsub.d"] - fn __lsx_vfsub_d(a: __v2f64, b: __v2f64) -> __v2f64; - #[link_name = "llvm.loongarch.lsx.vfmul.s"] - fn __lsx_vfmul_s(a: __v4f32, b: __v4f32) -> __v4f32; - #[link_name = "llvm.loongarch.lsx.vfmul.d"] - fn __lsx_vfmul_d(a: __v2f64, b: __v2f64) -> __v2f64; - #[link_name = "llvm.loongarch.lsx.vfdiv.s"] - fn __lsx_vfdiv_s(a: __v4f32, b: __v4f32) -> __v4f32; - #[link_name = "llvm.loongarch.lsx.vfdiv.d"] - fn __lsx_vfdiv_d(a: __v2f64, b: __v2f64) -> __v2f64; #[link_name = "llvm.loongarch.lsx.vfcvt.h.s"] fn __lsx_vfcvt_h_s(a: __v4f32, b: __v4f32) -> __v8i16; #[link_name = "llvm.loongarch.lsx.vfcvt.s.d"] @@ -691,10 +347,6 @@ unsafe extern "unadjusted" { fn __lsx_vfclass_s(a: __v4f32) -> __v4i32; #[link_name = "llvm.loongarch.lsx.vfclass.d"] fn __lsx_vfclass_d(a: __v2f64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vfsqrt.s"] - fn __lsx_vfsqrt_s(a: __v4f32) -> __v4f32; - #[link_name = "llvm.loongarch.lsx.vfsqrt.d"] - fn __lsx_vfsqrt_d(a: __v2f64) -> __v2f64; #[link_name = "llvm.loongarch.lsx.vfrecip.s"] fn __lsx_vfrecip_s(a: __v4f32) -> __v4f32; #[link_name = "llvm.loongarch.lsx.vfrecip.d"] @@ -751,16 +403,6 @@ unsafe extern "unadjusted" { fn __lsx_vffint_s_wu(a: __v4u32) -> __v4f32; #[link_name = "llvm.loongarch.lsx.vffint.d.lu"] fn __lsx_vffint_d_lu(a: __v2u64) -> __v2f64; - #[link_name = "llvm.loongarch.lsx.vandn.v"] - fn __lsx_vandn_v(a: __v16u8, b: __v16u8) -> __v16u8; - #[link_name = "llvm.loongarch.lsx.vneg.b"] - fn __lsx_vneg_b(a: __v16i8) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vneg.h"] - fn __lsx_vneg_h(a: __v8i16) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vneg.w"] - fn __lsx_vneg_w(a: __v4i32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vneg.d"] - fn __lsx_vneg_d(a: __v2i64) -> __v2i64; #[link_name = "llvm.loongarch.lsx.vmuh.b"] fn __lsx_vmuh_b(a: __v16i8, b: __v16i8) -> __v16i8; #[link_name = "llvm.loongarch.lsx.vmuh.h"] @@ -887,22 +529,6 @@ unsafe extern "unadjusted" { fn __lsx_vsigncov_w(a: __v4i32, b: __v4i32) -> __v4i32; #[link_name = "llvm.loongarch.lsx.vsigncov.d"] fn __lsx_vsigncov_d(a: __v2i64, b: __v2i64) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vfmadd.s"] - fn __lsx_vfmadd_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32; - #[link_name = "llvm.loongarch.lsx.vfmadd.d"] - fn __lsx_vfmadd_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64; - #[link_name = "llvm.loongarch.lsx.vfmsub.s"] - fn __lsx_vfmsub_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32; - #[link_name = "llvm.loongarch.lsx.vfmsub.d"] - fn __lsx_vfmsub_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64; - #[link_name = "llvm.loongarch.lsx.vfnmadd.s"] - fn __lsx_vfnmadd_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32; - #[link_name = "llvm.loongarch.lsx.vfnmadd.d"] - fn __lsx_vfnmadd_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64; - #[link_name = "llvm.loongarch.lsx.vfnmsub.s"] - fn __lsx_vfnmsub_s(a: __v4f32, b: __v4f32, c: __v4f32) -> __v4f32; - #[link_name = "llvm.loongarch.lsx.vfnmsub.d"] - fn __lsx_vfnmsub_d(a: __v2f64, b: __v2f64, c: __v2f64) -> __v2f64; #[link_name = "llvm.loongarch.lsx.vftintrne.w.s"] fn __lsx_vftintrne_w_s(a: __v4f32) -> __v4i32; #[link_name = "llvm.loongarch.lsx.vftintrne.l.d"] @@ -1323,8 +949,6 @@ unsafe extern "unadjusted" { fn __lsx_vssrln_h_w(a: __v4i32, b: __v4i32) -> __v8i16; #[link_name = "llvm.loongarch.lsx.vssrln.w.d"] fn __lsx_vssrln_w_d(a: __v2i64, b: __v2i64) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vorn.v"] - fn __lsx_vorn_v(a: __v16u8, b: __v16u8) -> __v16u8; #[link_name = "llvm.loongarch.lsx.vldi"] fn __lsx_vldi(a: i32) -> __v2i64; #[link_name = "llvm.loongarch.lsx.vshuf.b"] @@ -1443,142 +1067,6 @@ unsafe extern "unadjusted" { fn __lsx_vfcmp_sune_s(a: __v4f32, b: __v4f32) -> __v4i32; #[link_name = "llvm.loongarch.lsx.vfcmp.sun.s"] fn __lsx_vfcmp_sun_s(a: __v4f32, b: __v4f32) -> __v4i32; - #[link_name = "llvm.loongarch.lsx.vrepli.b"] - fn __lsx_vrepli_b(a: i32) -> __v16i8; - #[link_name = "llvm.loongarch.lsx.vrepli.d"] - fn __lsx_vrepli_d(a: i32) -> __v2i64; - #[link_name = "llvm.loongarch.lsx.vrepli.h"] - fn __lsx_vrepli_h(a: i32) -> __v8i16; - #[link_name = "llvm.loongarch.lsx.vrepli.w"] - fn __lsx_vrepli_w(a: i32) -> __v4i32; -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsll_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsll_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsll_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsll_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsll_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsll_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsll_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsll_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslli_b(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lsx_vslli_b(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslli_h(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lsx_vslli_h(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslli_w(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vslli_w(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslli_d(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM6, 6); - unsafe { transmute(__lsx_vslli_d(transmute(a), IMM6)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsra_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsra_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsra_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsra_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsra_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsra_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsra_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsra_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrai_b(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lsx_vsrai_b(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrai_h(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lsx_vsrai_h(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrai_w(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vsrai_w(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrai_d(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM6, 6); - unsafe { transmute(__lsx_vsrai_d(transmute(a), IMM6)) } } #[inline(always)] @@ -1645,70 +1133,6 @@ pub fn lsx_vsrari_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrari_d(transmute(a), IMM6)) } } -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrl_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsrl_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrl_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsrl_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrl_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsrl_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrl_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsrl_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrli_b(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lsx_vsrli_b(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrli_h(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lsx_vsrli_h(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrli_w(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vsrli_w(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsrli_d(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM6, 6); - unsafe { transmute(__lsx_vsrli_d(transmute(a), IMM6)) } -} - #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1967,1412 +1391,548 @@ pub fn lsx_vbitrevi_d(a: m128i) -> m128i { #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vadd_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vadd_b(transmute(a), transmute(b))) } +pub fn lsx_vsubi_bu(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM5, 5); + unsafe { transmute(__lsx_vsubi_bu(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vadd_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vadd_h(transmute(a), transmute(b))) } +pub fn lsx_vsubi_hu(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM5, 5); + unsafe { transmute(__lsx_vsubi_hu(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vadd_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vadd_w(transmute(a), transmute(b))) } +pub fn lsx_vsubi_wu(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM5, 5); + unsafe { transmute(__lsx_vsubi_wu(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vadd_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vadd_d(transmute(a), transmute(b))) } +pub fn lsx_vsubi_du(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM5, 5); + unsafe { transmute(__lsx_vsubi_du(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vaddi_bu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vaddi_bu(transmute(a), IMM5)) } +pub fn lsx_vsat_b(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM3, 3); + unsafe { transmute(__lsx_vsat_b(transmute(a), IMM3)) } } #[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vaddi_hu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vaddi_hu(transmute(a), IMM5)) } +pub fn lsx_vsat_h(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM4, 4); + unsafe { transmute(__lsx_vsat_h(transmute(a), IMM4)) } } #[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vaddi_wu(a: m128i) -> m128i { +pub fn lsx_vsat_w(a: m128i) -> m128i { static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vaddi_wu(transmute(a), IMM5)) } + unsafe { transmute(__lsx_vsat_w(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vaddi_du(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vaddi_du(transmute(a), IMM5)) } +pub fn lsx_vsat_d(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM6, 6); + unsafe { transmute(__lsx_vsat_d(transmute(a), IMM6)) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsub_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsub_b(transmute(a), transmute(b))) } +pub fn lsx_vsat_bu(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM3, 3); + unsafe { transmute(__lsx_vsat_bu(transmute(a), IMM3)) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsub_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsub_h(transmute(a), transmute(b))) } +pub fn lsx_vsat_hu(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM4, 4); + unsafe { transmute(__lsx_vsat_hu(transmute(a), IMM4)) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsub_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsub_w(transmute(a), transmute(b))) } +pub fn lsx_vsat_wu(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM5, 5); + unsafe { transmute(__lsx_vsat_wu(transmute(a), IMM5)) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsub_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsub_d(transmute(a), transmute(b))) } +pub fn lsx_vsat_du(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM6, 6); + unsafe { transmute(__lsx_vsat_du(transmute(a), IMM6)) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsubi_bu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vsubi_bu(transmute(a), IMM5)) } +pub fn lsx_vadda_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vadda_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsubi_hu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vsubi_hu(transmute(a), IMM5)) } +pub fn lsx_vadda_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vadda_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsubi_wu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vsubi_wu(transmute(a), IMM5)) } +pub fn lsx_vadda_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vadda_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsubi_du(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vsubi_du(transmute(a), IMM5)) } +pub fn lsx_vadda_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vadda_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmax_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmax_b(transmute(a), transmute(b))) } +pub fn lsx_vsadd_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vsadd_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmax_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmax_h(transmute(a), transmute(b))) } +pub fn lsx_vsadd_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vsadd_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmax_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmax_w(transmute(a), transmute(b))) } +pub fn lsx_vsadd_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vsadd_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmax_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmax_d(transmute(a), transmute(b))) } +pub fn lsx_vsadd_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vsadd_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmaxi_b(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vmaxi_b(transmute(a), IMM_S5)) } +pub fn lsx_vsadd_bu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vsadd_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmaxi_h(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vmaxi_h(transmute(a), IMM_S5)) } +pub fn lsx_vsadd_hu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vsadd_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmaxi_w(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vmaxi_w(transmute(a), IMM_S5)) } +pub fn lsx_vsadd_wu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vsadd_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmaxi_d(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vmaxi_d(transmute(a), IMM_S5)) } +pub fn lsx_vsadd_du(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vsadd_du(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmax_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmax_bu(transmute(a), transmute(b))) } +pub fn lsx_vavg_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavg_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmax_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmax_hu(transmute(a), transmute(b))) } +pub fn lsx_vavg_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavg_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmax_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmax_wu(transmute(a), transmute(b))) } +pub fn lsx_vavg_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavg_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmax_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmax_du(transmute(a), transmute(b))) } +pub fn lsx_vavg_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavg_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmaxi_bu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vmaxi_bu(transmute(a), IMM5)) } +pub fn lsx_vavg_bu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavg_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmaxi_hu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vmaxi_hu(transmute(a), IMM5)) } +pub fn lsx_vavg_hu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavg_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmaxi_wu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vmaxi_wu(transmute(a), IMM5)) } +pub fn lsx_vavg_wu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavg_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmaxi_du(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vmaxi_du(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmin_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmin_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmin_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmin_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmin_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmin_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmin_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmin_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmini_b(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vmini_b(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmini_h(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vmini_h(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmini_w(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vmini_w(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmini_d(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vmini_d(transmute(a), IMM_S5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmin_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmin_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmin_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmin_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmin_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmin_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmin_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmin_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmini_bu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vmini_bu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmini_hu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vmini_hu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmini_wu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vmini_wu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmini_du(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vmini_du(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vseq_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vseq_b(transmute(a), transmute(b))) } +pub fn lsx_vavg_du(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavg_du(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vseq_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vseq_h(transmute(a), transmute(b))) } +pub fn lsx_vavgr_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavgr_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vseq_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vseq_w(transmute(a), transmute(b))) } +pub fn lsx_vavgr_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavgr_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vseq_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vseq_d(transmute(a), transmute(b))) } +pub fn lsx_vavgr_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavgr_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vseqi_b(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vseqi_b(transmute(a), IMM_S5)) } +pub fn lsx_vavgr_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavgr_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vseqi_h(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vseqi_h(transmute(a), IMM_S5)) } +pub fn lsx_vavgr_bu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavgr_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vseqi_w(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vseqi_w(transmute(a), IMM_S5)) } +pub fn lsx_vavgr_hu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavgr_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vseqi_d(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vseqi_d(transmute(a), IMM_S5)) } +pub fn lsx_vavgr_wu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavgr_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslti_b(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vslti_b(transmute(a), IMM_S5)) } +pub fn lsx_vavgr_du(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vavgr_du(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslt_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vslt_b(transmute(a), transmute(b))) } +pub fn lsx_vssub_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vssub_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslt_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vslt_h(transmute(a), transmute(b))) } +pub fn lsx_vssub_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vssub_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslt_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vslt_w(transmute(a), transmute(b))) } +pub fn lsx_vssub_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vssub_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslt_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vslt_d(transmute(a), transmute(b))) } +pub fn lsx_vssub_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vssub_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslti_h(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vslti_h(transmute(a), IMM_S5)) } +pub fn lsx_vssub_bu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vssub_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslti_w(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vslti_w(transmute(a), IMM_S5)) } +pub fn lsx_vssub_hu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vssub_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslti_d(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vslti_d(transmute(a), IMM_S5)) } +pub fn lsx_vssub_wu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vssub_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslt_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vslt_bu(transmute(a), transmute(b))) } +pub fn lsx_vssub_du(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vssub_du(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslt_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vslt_hu(transmute(a), transmute(b))) } +pub fn lsx_vabsd_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vabsd_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslt_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vslt_wu(transmute(a), transmute(b))) } +pub fn lsx_vabsd_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vabsd_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslt_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vslt_du(transmute(a), transmute(b))) } +pub fn lsx_vabsd_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vabsd_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslti_bu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vslti_bu(transmute(a), IMM5)) } +pub fn lsx_vabsd_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vabsd_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslti_hu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vslti_hu(transmute(a), IMM5)) } +pub fn lsx_vabsd_bu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vabsd_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslti_wu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vslti_wu(transmute(a), IMM5)) } +pub fn lsx_vabsd_hu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vabsd_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslti_du(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vslti_du(transmute(a), IMM5)) } +pub fn lsx_vabsd_wu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vabsd_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsle_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsle_b(transmute(a), transmute(b))) } +pub fn lsx_vabsd_du(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vabsd_du(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsle_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsle_h(transmute(a), transmute(b))) } +pub fn lsx_vhaddw_h_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhaddw_h_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsle_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsle_w(transmute(a), transmute(b))) } +pub fn lsx_vhaddw_w_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhaddw_w_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsle_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsle_d(transmute(a), transmute(b))) } +pub fn lsx_vhaddw_d_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhaddw_d_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslei_b(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vslei_b(transmute(a), IMM_S5)) } +pub fn lsx_vhaddw_hu_bu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhaddw_hu_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslei_h(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vslei_h(transmute(a), IMM_S5)) } +pub fn lsx_vhaddw_wu_hu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhaddw_wu_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslei_w(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vslei_w(transmute(a), IMM_S5)) } +pub fn lsx_vhaddw_du_wu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhaddw_du_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslei_d(a: m128i) -> m128i { - static_assert_simm_bits!(IMM_S5, 5); - unsafe { transmute(__lsx_vslei_d(transmute(a), IMM_S5)) } +pub fn lsx_vhsubw_h_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhsubw_h_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsle_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsle_bu(transmute(a), transmute(b))) } +pub fn lsx_vhsubw_w_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhsubw_w_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsle_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsle_hu(transmute(a), transmute(b))) } +pub fn lsx_vhsubw_d_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhsubw_d_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsle_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsle_wu(transmute(a), transmute(b))) } +pub fn lsx_vhsubw_hu_bu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhsubw_hu_bu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsle_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsle_du(transmute(a), transmute(b))) } +pub fn lsx_vhsubw_wu_hu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhsubw_wu_hu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslei_bu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vslei_bu(transmute(a), IMM5)) } +pub fn lsx_vhsubw_du_wu(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vhsubw_du_wu(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslei_hu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vslei_hu(transmute(a), IMM5)) } +pub fn lsx_vreplve_b(a: m128i, b: i32) -> m128i { + unsafe { transmute(__lsx_vreplve_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslei_wu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vslei_wu(transmute(a), IMM5)) } +pub fn lsx_vreplve_h(a: m128i, b: i32) -> m128i { + unsafe { transmute(__lsx_vreplve_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vslei_du(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vslei_du(transmute(a), IMM5)) } +pub fn lsx_vreplve_w(a: m128i, b: i32) -> m128i { + unsafe { transmute(__lsx_vreplve_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsat_b(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lsx_vsat_b(transmute(a), IMM3)) } +pub fn lsx_vreplve_d(a: m128i, b: i32) -> m128i { + unsafe { transmute(__lsx_vreplve_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsat_h(a: m128i) -> m128i { +pub fn lsx_vreplvei_b(a: m128i) -> m128i { static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lsx_vsat_h(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsat_w(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vsat_w(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsat_d(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM6, 6); - unsafe { transmute(__lsx_vsat_d(transmute(a), IMM6)) } + unsafe { transmute(__lsx_vreplvei_b(transmute(a), IMM4)) } } #[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsat_bu(a: m128i) -> m128i { +pub fn lsx_vreplvei_h(a: m128i) -> m128i { static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lsx_vsat_bu(transmute(a), IMM3)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsat_hu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lsx_vsat_hu(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsat_wu(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM5, 5); - unsafe { transmute(__lsx_vsat_wu(transmute(a), IMM5)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsat_du(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM6, 6); - unsafe { transmute(__lsx_vsat_du(transmute(a), IMM6)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vadda_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vadda_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vadda_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vadda_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vadda_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vadda_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vadda_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vadda_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsadd_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsadd_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsadd_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsadd_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsadd_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsadd_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsadd_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsadd_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsadd_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsadd_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsadd_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsadd_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsadd_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsadd_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vsadd_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vsadd_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavg_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavg_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavg_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavg_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavg_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavg_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavg_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavg_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavg_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavg_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavg_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavg_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavg_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavg_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavg_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavg_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavgr_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavgr_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavgr_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavgr_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavgr_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavgr_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavgr_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavgr_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavgr_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavgr_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavgr_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavgr_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavgr_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavgr_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vavgr_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vavgr_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vssub_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vssub_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vssub_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vssub_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vssub_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vssub_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vssub_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vssub_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vssub_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vssub_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vssub_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vssub_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vssub_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vssub_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vssub_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vssub_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vabsd_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vabsd_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vabsd_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vabsd_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vabsd_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vabsd_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vabsd_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vabsd_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vabsd_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vabsd_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vabsd_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vabsd_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vabsd_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vabsd_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vabsd_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vabsd_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmul_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmul_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmul_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmul_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmul_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmul_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmul_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmul_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmadd_b(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vmadd_b(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmadd_h(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vmadd_h(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmadd_w(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vmadd_w(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmadd_d(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vmadd_d(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmsub_b(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vmsub_b(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmsub_h(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vmsub_h(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmsub_w(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vmsub_w(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmsub_d(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vmsub_d(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vdiv_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vdiv_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vdiv_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vdiv_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vdiv_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vdiv_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vdiv_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vdiv_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vdiv_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vdiv_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vdiv_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vdiv_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vdiv_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vdiv_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vdiv_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vdiv_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhaddw_h_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhaddw_h_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhaddw_w_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhaddw_w_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhaddw_d_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhaddw_d_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhaddw_hu_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhaddw_hu_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhaddw_wu_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhaddw_wu_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhaddw_du_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhaddw_du_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhsubw_h_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhsubw_h_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhsubw_w_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhsubw_w_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhsubw_d_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhsubw_d_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhsubw_hu_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhsubw_hu_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhsubw_wu_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhsubw_wu_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vhsubw_du_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vhsubw_du_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmod_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmod_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmod_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmod_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmod_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmod_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmod_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmod_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmod_bu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmod_bu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmod_hu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmod_hu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmod_wu(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmod_wu(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vmod_du(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vmod_du(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplve_b(a: m128i, b: i32) -> m128i { - unsafe { transmute(__lsx_vreplve_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplve_h(a: m128i, b: i32) -> m128i { - unsafe { transmute(__lsx_vreplve_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplve_w(a: m128i, b: i32) -> m128i { - unsafe { transmute(__lsx_vreplve_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplve_d(a: m128i, b: i32) -> m128i { - unsafe { transmute(__lsx_vreplve_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplvei_b(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lsx_vreplvei_b(transmute(a), IMM4)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplvei_h(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lsx_vreplvei_h(transmute(a), IMM3)) } + unsafe { transmute(__lsx_vreplvei_h(transmute(a), IMM3)) } } #[inline(always)] @@ -3386,583 +1946,307 @@ pub fn lsx_vreplvei_w(a: m128i) -> m128i { #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplvei_d(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM1, 1); - unsafe { transmute(__lsx_vreplvei_d(transmute(a), IMM1)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickev_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpickev_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickev_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpickev_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickev_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpickev_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickev_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpickev_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickod_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpickod_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickod_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpickod_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickod_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpickod_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickod_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpickod_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vilvh_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vilvh_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vilvh_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vilvh_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vilvh_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vilvh_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vilvh_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vilvh_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vilvl_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vilvl_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vilvl_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vilvl_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vilvl_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vilvl_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vilvl_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vilvl_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpackev_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpackev_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpackev_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpackev_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpackev_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpackev_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpackev_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpackev_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpackod_b(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpackod_b(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpackod_h(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpackod_h(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpackod_w(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpackod_w(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpackod_d(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vpackod_d(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vshuf_h(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vshuf_h(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vshuf_w(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vshuf_w(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vshuf_d(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vshuf_d(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vand_v(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vand_v(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vandi_b(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lsx_vandi_b(transmute(a), IMM8)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vor_v(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vor_v(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vori_b(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lsx_vori_b(transmute(a), IMM8)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vnor_v(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vnor_v(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vnori_b(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lsx_vnori_b(transmute(a), IMM8)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vxor_v(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vxor_v(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vxori_b(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lsx_vxori_b(transmute(a), IMM8)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vbitsel_v(a: m128i, b: m128i, c: m128i) -> m128i { - unsafe { transmute(__lsx_vbitsel_v(transmute(a), transmute(b), transmute(c))) } +pub fn lsx_vreplvei_d(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM1, 1); + unsafe { transmute(__lsx_vreplvei_d(transmute(a), IMM1)) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vbitseli_b(a: m128i, b: m128i) -> m128i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lsx_vbitseli_b(transmute(a), transmute(b), IMM8)) } +pub fn lsx_vpickev_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpickev_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vshuf4i_b(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lsx_vshuf4i_b(transmute(a), IMM8)) } +pub fn lsx_vpickev_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpickev_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vshuf4i_h(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lsx_vshuf4i_h(transmute(a), IMM8)) } +pub fn lsx_vpickev_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpickev_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vshuf4i_w(a: m128i) -> m128i { - static_assert_uimm_bits!(IMM8, 8); - unsafe { transmute(__lsx_vshuf4i_w(transmute(a), IMM8)) } +pub fn lsx_vpickev_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpickev_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplgr2vr_b(a: i32) -> m128i { - unsafe { transmute(__lsx_vreplgr2vr_b(transmute(a))) } +pub fn lsx_vpickod_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpickod_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplgr2vr_h(a: i32) -> m128i { - unsafe { transmute(__lsx_vreplgr2vr_h(transmute(a))) } +pub fn lsx_vpickod_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpickod_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplgr2vr_w(a: i32) -> m128i { - unsafe { transmute(__lsx_vreplgr2vr_w(transmute(a))) } +pub fn lsx_vpickod_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpickod_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vreplgr2vr_d(a: i64) -> m128i { - unsafe { transmute(__lsx_vreplgr2vr_d(transmute(a))) } +pub fn lsx_vpickod_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpickod_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpcnt_b(a: m128i) -> m128i { - unsafe { transmute(__lsx_vpcnt_b(transmute(a))) } +pub fn lsx_vilvh_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vilvh_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpcnt_h(a: m128i) -> m128i { - unsafe { transmute(__lsx_vpcnt_h(transmute(a))) } +pub fn lsx_vilvh_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vilvh_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpcnt_w(a: m128i) -> m128i { - unsafe { transmute(__lsx_vpcnt_w(transmute(a))) } +pub fn lsx_vilvh_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vilvh_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpcnt_d(a: m128i) -> m128i { - unsafe { transmute(__lsx_vpcnt_d(transmute(a))) } +pub fn lsx_vilvh_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vilvh_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vclo_b(a: m128i) -> m128i { - unsafe { transmute(__lsx_vclo_b(transmute(a))) } +pub fn lsx_vilvl_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vilvl_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vclo_h(a: m128i) -> m128i { - unsafe { transmute(__lsx_vclo_h(transmute(a))) } +pub fn lsx_vilvl_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vilvl_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vclo_w(a: m128i) -> m128i { - unsafe { transmute(__lsx_vclo_w(transmute(a))) } +pub fn lsx_vilvl_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vilvl_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vclo_d(a: m128i) -> m128i { - unsafe { transmute(__lsx_vclo_d(transmute(a))) } +pub fn lsx_vilvl_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vilvl_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vclz_b(a: m128i) -> m128i { - unsafe { transmute(__lsx_vclz_b(transmute(a))) } +pub fn lsx_vpackev_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpackev_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vclz_h(a: m128i) -> m128i { - unsafe { transmute(__lsx_vclz_h(transmute(a))) } +pub fn lsx_vpackev_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpackev_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vclz_w(a: m128i) -> m128i { - unsafe { transmute(__lsx_vclz_w(transmute(a))) } +pub fn lsx_vpackev_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpackev_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vclz_d(a: m128i) -> m128i { - unsafe { transmute(__lsx_vclz_d(transmute(a))) } +pub fn lsx_vpackev_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpackev_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickve2gr_b(a: m128i) -> i32 { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lsx_vpickve2gr_b(transmute(a), IMM4)) } +pub fn lsx_vpackod_b(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpackod_b(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickve2gr_h(a: m128i) -> i32 { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lsx_vpickve2gr_h(transmute(a), IMM3)) } +pub fn lsx_vpackod_h(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpackod_h(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickve2gr_w(a: m128i) -> i32 { - static_assert_uimm_bits!(IMM2, 2); - unsafe { transmute(__lsx_vpickve2gr_w(transmute(a), IMM2)) } +pub fn lsx_vpackod_w(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpackod_w(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickve2gr_d(a: m128i) -> i64 { - static_assert_uimm_bits!(IMM1, 1); - unsafe { transmute(__lsx_vpickve2gr_d(transmute(a), IMM1)) } +pub fn lsx_vpackod_d(a: m128i, b: m128i) -> m128i { + unsafe { transmute(__lsx_vpackod_d(transmute(a), transmute(b))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickve2gr_bu(a: m128i) -> u32 { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lsx_vpickve2gr_bu(transmute(a), IMM4)) } +pub fn lsx_vshuf_h(a: m128i, b: m128i, c: m128i) -> m128i { + unsafe { transmute(__lsx_vshuf_h(transmute(a), transmute(b), transmute(c))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickve2gr_hu(a: m128i) -> u32 { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lsx_vpickve2gr_hu(transmute(a), IMM3)) } +pub fn lsx_vshuf_w(a: m128i, b: m128i, c: m128i) -> m128i { + unsafe { transmute(__lsx_vshuf_w(transmute(a), transmute(b), transmute(c))) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickve2gr_wu(a: m128i) -> u32 { - static_assert_uimm_bits!(IMM2, 2); - unsafe { transmute(__lsx_vpickve2gr_wu(transmute(a), IMM2)) } +pub fn lsx_vshuf_d(a: m128i, b: m128i, c: m128i) -> m128i { + unsafe { transmute(__lsx_vshuf_d(transmute(a), transmute(b), transmute(c))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vpickve2gr_du(a: m128i) -> u64 { - static_assert_uimm_bits!(IMM1, 1); - unsafe { transmute(__lsx_vpickve2gr_du(transmute(a), IMM1)) } +pub fn lsx_vandi_b(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lsx_vandi_b(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vinsgr2vr_b(a: m128i, b: i32) -> m128i { - static_assert_uimm_bits!(IMM4, 4); - unsafe { transmute(__lsx_vinsgr2vr_b(transmute(a), transmute(b), IMM4)) } +pub fn lsx_vori_b(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lsx_vori_b(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vinsgr2vr_h(a: m128i, b: i32) -> m128i { - static_assert_uimm_bits!(IMM3, 3); - unsafe { transmute(__lsx_vinsgr2vr_h(transmute(a), transmute(b), IMM3)) } +pub fn lsx_vnori_b(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lsx_vnori_b(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(2)] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vinsgr2vr_w(a: m128i, b: i32) -> m128i { - static_assert_uimm_bits!(IMM2, 2); - unsafe { transmute(__lsx_vinsgr2vr_w(transmute(a), transmute(b), IMM2)) } +pub fn lsx_vxori_b(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lsx_vxori_b(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vinsgr2vr_d(a: m128i, b: i64) -> m128i { - static_assert_uimm_bits!(IMM1, 1); - unsafe { transmute(__lsx_vinsgr2vr_d(transmute(a), transmute(b), IMM1)) } +pub fn lsx_vbitsel_v(a: m128i, b: m128i, c: m128i) -> m128i { + unsafe { transmute(__lsx_vbitsel_v(transmute(a), transmute(b), transmute(c))) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfadd_s(a: m128, b: m128) -> m128 { - unsafe { transmute(__lsx_vfadd_s(transmute(a), transmute(b))) } +pub fn lsx_vbitseli_b(a: m128i, b: m128i) -> m128i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lsx_vbitseli_b(transmute(a), transmute(b), IMM8)) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfadd_d(a: m128d, b: m128d) -> m128d { - unsafe { transmute(__lsx_vfadd_d(transmute(a), transmute(b))) } +pub fn lsx_vshuf4i_b(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lsx_vshuf4i_b(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfsub_s(a: m128, b: m128) -> m128 { - unsafe { transmute(__lsx_vfsub_s(transmute(a), transmute(b))) } +pub fn lsx_vshuf4i_h(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lsx_vshuf4i_h(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lsx")] +#[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfsub_d(a: m128d, b: m128d) -> m128d { - unsafe { transmute(__lsx_vfsub_d(transmute(a), transmute(b))) } +pub fn lsx_vshuf4i_w(a: m128i) -> m128i { + static_assert_uimm_bits!(IMM8, 8); + unsafe { transmute(__lsx_vshuf4i_w(transmute(a), IMM8)) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfmul_s(a: m128, b: m128) -> m128 { - unsafe { transmute(__lsx_vfmul_s(transmute(a), transmute(b))) } +pub fn lsx_vclo_b(a: m128i) -> m128i { + unsafe { transmute(__lsx_vclo_b(transmute(a))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfmul_d(a: m128d, b: m128d) -> m128d { - unsafe { transmute(__lsx_vfmul_d(transmute(a), transmute(b))) } +pub fn lsx_vclo_h(a: m128i) -> m128i { + unsafe { transmute(__lsx_vclo_h(transmute(a))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfdiv_s(a: m128, b: m128) -> m128 { - unsafe { transmute(__lsx_vfdiv_s(transmute(a), transmute(b))) } +pub fn lsx_vclo_w(a: m128i) -> m128i { + unsafe { transmute(__lsx_vclo_w(transmute(a))) } } #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfdiv_d(a: m128d, b: m128d) -> m128d { - unsafe { transmute(__lsx_vfdiv_d(transmute(a), transmute(b))) } +pub fn lsx_vclo_d(a: m128i) -> m128i { + unsafe { transmute(__lsx_vclo_d(transmute(a))) } } #[inline(always)] @@ -4049,20 +2333,6 @@ pub fn lsx_vfclass_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vfclass_d(transmute(a))) } } -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfsqrt_s(a: m128) -> m128 { - unsafe { transmute(__lsx_vfsqrt_s(transmute(a))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfsqrt_d(a: m128d) -> m128d { - unsafe { transmute(__lsx_vfsqrt_d(transmute(a))) } -} - #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4259,41 +2529,6 @@ pub fn lsx_vffint_d_lu(a: m128i) -> m128d { unsafe { transmute(__lsx_vffint_d_lu(transmute(a))) } } -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vandn_v(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vandn_v(transmute(a), transmute(b))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vneg_b(a: m128i) -> m128i { - unsafe { transmute(__lsx_vneg_b(transmute(a))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vneg_h(a: m128i) -> m128i { - unsafe { transmute(__lsx_vneg_h(transmute(a))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vneg_w(a: m128i) -> m128i { - unsafe { transmute(__lsx_vneg_w(transmute(a))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vneg_d(a: m128i) -> m128i { - unsafe { transmute(__lsx_vneg_d(transmute(a))) } -} - #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4765,62 +3000,6 @@ pub fn lsx_vsigncov_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsigncov_d(transmute(a), transmute(b))) } } -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfmadd_s(a: m128, b: m128, c: m128) -> m128 { - unsafe { transmute(__lsx_vfmadd_s(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfmadd_d(a: m128d, b: m128d, c: m128d) -> m128d { - unsafe { transmute(__lsx_vfmadd_d(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfmsub_s(a: m128, b: m128, c: m128) -> m128 { - unsafe { transmute(__lsx_vfmsub_s(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfmsub_d(a: m128d, b: m128d, c: m128d) -> m128d { - unsafe { transmute(__lsx_vfmsub_d(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfnmadd_s(a: m128, b: m128, c: m128) -> m128 { - unsafe { transmute(__lsx_vfnmadd_s(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfnmadd_d(a: m128d, b: m128d, c: m128d) -> m128d { - unsafe { transmute(__lsx_vfnmadd_d(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfnmsub_s(a: m128, b: m128, c: m128) -> m128 { - unsafe { transmute(__lsx_vfnmsub_s(transmute(a), transmute(b), transmute(c))) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vfnmsub_d(a: m128d, b: m128d, c: m128d) -> m128d { - unsafe { transmute(__lsx_vfnmsub_d(transmute(a), transmute(b), transmute(c))) } -} - #[inline(always)] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -6421,13 +4600,6 @@ pub fn lsx_vssrln_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_w_d(transmute(a), transmute(b))) } } -#[inline(always)] -#[target_feature(enable = "lsx")] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vorn_v(a: m128i, b: m128i) -> m128i { - unsafe { transmute(__lsx_vorn_v(transmute(a), transmute(b))) } -} - #[inline(always)] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] @@ -6842,39 +5014,3 @@ pub fn lsx_vfcmp_sune_s(a: m128, b: m128) -> m128i { pub fn lsx_vfcmp_sun_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sun_s(transmute(a), transmute(b))) } } - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(0)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vrepli_b() -> m128i { - static_assert_simm_bits!(IMM_S10, 10); - unsafe { transmute(__lsx_vrepli_b(IMM_S10)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(0)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vrepli_d() -> m128i { - static_assert_simm_bits!(IMM_S10, 10); - unsafe { transmute(__lsx_vrepli_d(IMM_S10)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(0)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vrepli_h() -> m128i { - static_assert_simm_bits!(IMM_S10, 10); - unsafe { transmute(__lsx_vrepli_h(IMM_S10)) } -} - -#[inline(always)] -#[target_feature(enable = "lsx")] -#[rustc_legacy_const_generics(0)] -#[unstable(feature = "stdarch_loongarch", issue = "117427")] -pub fn lsx_vrepli_w() -> m128i { - static_assert_simm_bits!(IMM_S10, 10); - unsafe { transmute(__lsx_vrepli_w(IMM_S10)) } -} diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/mod.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/mod.rs index 67a08985a9637..0d353746ea859 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/mod.rs @@ -16,6 +16,13 @@ mod generated; #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub use self::generated::*; +#[rustfmt::skip] +mod portable; + +#[rustfmt::skip] +#[unstable(feature = "stdarch_loongarch", issue = "117427")] +pub use self::portable::*; + #[rustfmt::skip] #[cfg(test)] mod tests; diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/portable.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/portable.rs new file mode 100644 index 0000000000000..e33b1758f3111 --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/portable.rs @@ -0,0 +1,207 @@ +//! LoongArch64 LSX intrinsics - intrinsics::simd implementation + +use super::super::{simd::*, *}; +use crate::core_arch::simd::*; +use crate::intrinsics::simd::*; +use crate::mem::transmute; + +impl_vv!("lsx", lsx_vpcnt_b, simd_ctpop, m128i, i8x16); +impl_vv!("lsx", lsx_vpcnt_h, simd_ctpop, m128i, i16x8); +impl_vv!("lsx", lsx_vpcnt_w, simd_ctpop, m128i, i32x4); +impl_vv!("lsx", lsx_vpcnt_d, simd_ctpop, m128i, i64x2); +impl_vv!("lsx", lsx_vclz_b, simd_ctlz, m128i, i8x16); +impl_vv!("lsx", lsx_vclz_h, simd_ctlz, m128i, i16x8); +impl_vv!("lsx", lsx_vclz_w, simd_ctlz, m128i, i32x4); +impl_vv!("lsx", lsx_vclz_d, simd_ctlz, m128i, i64x2); +impl_vv!("lsx", lsx_vneg_b, simd_neg, m128i, i8x16); +impl_vv!("lsx", lsx_vneg_h, simd_neg, m128i, i16x8); +impl_vv!("lsx", lsx_vneg_w, simd_neg, m128i, i32x4); +impl_vv!("lsx", lsx_vneg_d, simd_neg, m128i, i64x2); +impl_vv!("lsx", lsx_vfsqrt_s, simd_fsqrt, m128, f32x4); +impl_vv!("lsx", lsx_vfsqrt_d, simd_fsqrt, m128d, f64x2); + +impl_gv!("lsx", lsx_vreplgr2vr_b, simdl_splat, m128i, i8x16, i32); +impl_gv!("lsx", lsx_vreplgr2vr_h, simdl_splat, m128i, i16x8, i32); +impl_gv!("lsx", lsx_vreplgr2vr_w, simdl_splat, m128i, i32x4, i32); +impl_gv!("lsx", lsx_vreplgr2vr_d, simdl_splat, m128i, i64x2, i64); + +impl_sv!("lsx", lsx_vrepli_b, simdl_splat, m128i, i8x16, 10); +impl_sv!("lsx", lsx_vrepli_h, simdl_splat, m128i, i16x8, 10); +impl_sv!("lsx", lsx_vrepli_w, simdl_splat, m128i, i32x4, 10); +impl_sv!("lsx", lsx_vrepli_d, simdl_splat, m128i, i64x2, 10); + +impl_vvv!("lsx", lsx_vadd_b, simd_add, m128i, i8x16); +impl_vvv!("lsx", lsx_vadd_h, simd_add, m128i, i16x8); +impl_vvv!("lsx", lsx_vadd_w, simd_add, m128i, i32x4); +impl_vvv!("lsx", lsx_vadd_d, simd_add, m128i, i64x2); +impl_vvv!("lsx", lsx_vsub_b, simd_sub, m128i, i8x16); +impl_vvv!("lsx", lsx_vsub_h, simd_sub, m128i, i16x8); +impl_vvv!("lsx", lsx_vsub_w, simd_sub, m128i, i32x4); +impl_vvv!("lsx", lsx_vsub_d, simd_sub, m128i, i64x2); +impl_vvv!("lsx", lsx_vmax_b, simd_imax, m128i, i8x16); +impl_vvv!("lsx", lsx_vmax_h, simd_imax, m128i, i16x8); +impl_vvv!("lsx", lsx_vmax_w, simd_imax, m128i, i32x4); +impl_vvv!("lsx", lsx_vmax_d, simd_imax, m128i, i64x2); +impl_vvv!("lsx", lsx_vmax_bu, simd_imax, m128i, u8x16); +impl_vvv!("lsx", lsx_vmax_hu, simd_imax, m128i, u16x8); +impl_vvv!("lsx", lsx_vmax_wu, simd_imax, m128i, u32x4); +impl_vvv!("lsx", lsx_vmax_du, simd_imax, m128i, u64x2); +impl_vvv!("lsx", lsx_vmin_b, simd_imin, m128i, i8x16); +impl_vvv!("lsx", lsx_vmin_h, simd_imin, m128i, i16x8); +impl_vvv!("lsx", lsx_vmin_w, simd_imin, m128i, i32x4); +impl_vvv!("lsx", lsx_vmin_d, simd_imin, m128i, i64x2); +impl_vvv!("lsx", lsx_vmin_bu, simd_imin, m128i, u8x16); +impl_vvv!("lsx", lsx_vmin_hu, simd_imin, m128i, u16x8); +impl_vvv!("lsx", lsx_vmin_wu, simd_imin, m128i, u32x4); +impl_vvv!("lsx", lsx_vmin_du, simd_imin, m128i, u64x2); +impl_vvv!("lsx", lsx_vseq_b, simd_eq, m128i, i8x16); +impl_vvv!("lsx", lsx_vseq_h, simd_eq, m128i, i16x8); +impl_vvv!("lsx", lsx_vseq_w, simd_eq, m128i, i32x4); +impl_vvv!("lsx", lsx_vseq_d, simd_eq, m128i, i64x2); +impl_vvv!("lsx", lsx_vslt_b, simd_lt, m128i, i8x16); +impl_vvv!("lsx", lsx_vslt_h, simd_lt, m128i, i16x8); +impl_vvv!("lsx", lsx_vslt_w, simd_lt, m128i, i32x4); +impl_vvv!("lsx", lsx_vslt_d, simd_lt, m128i, i64x2); +impl_vvv!("lsx", lsx_vslt_bu, simd_lt, m128i, u8x16); +impl_vvv!("lsx", lsx_vslt_hu, simd_lt, m128i, u16x8); +impl_vvv!("lsx", lsx_vslt_wu, simd_lt, m128i, u32x4); +impl_vvv!("lsx", lsx_vslt_du, simd_lt, m128i, u64x2); +impl_vvv!("lsx", lsx_vsle_b, simd_le, m128i, i8x16); +impl_vvv!("lsx", lsx_vsle_h, simd_le, m128i, i16x8); +impl_vvv!("lsx", lsx_vsle_w, simd_le, m128i, i32x4); +impl_vvv!("lsx", lsx_vsle_d, simd_le, m128i, i64x2); +impl_vvv!("lsx", lsx_vsle_bu, simd_le, m128i, u8x16); +impl_vvv!("lsx", lsx_vsle_hu, simd_le, m128i, u16x8); +impl_vvv!("lsx", lsx_vsle_wu, simd_le, m128i, u32x4); +impl_vvv!("lsx", lsx_vsle_du, simd_le, m128i, u64x2); +impl_vvv!("lsx", lsx_vmul_b, simd_mul, m128i, i8x16); +impl_vvv!("lsx", lsx_vmul_h, simd_mul, m128i, i16x8); +impl_vvv!("lsx", lsx_vmul_w, simd_mul, m128i, i32x4); +impl_vvv!("lsx", lsx_vmul_d, simd_mul, m128i, i64x2); +impl_vvv!("lsx", lsx_vdiv_b, simd_div, m128i, i8x16); +impl_vvv!("lsx", lsx_vdiv_h, simd_div, m128i, i16x8); +impl_vvv!("lsx", lsx_vdiv_w, simd_div, m128i, i32x4); +impl_vvv!("lsx", lsx_vdiv_d, simd_div, m128i, i64x2); +impl_vvv!("lsx", lsx_vdiv_bu, simd_div, m128i, u8x16); +impl_vvv!("lsx", lsx_vdiv_hu, simd_div, m128i, u16x8); +impl_vvv!("lsx", lsx_vdiv_wu, simd_div, m128i, u32x4); +impl_vvv!("lsx", lsx_vdiv_du, simd_div, m128i, u64x2); +impl_vvv!("lsx", lsx_vmod_b, simd_rem, m128i, i8x16); +impl_vvv!("lsx", lsx_vmod_h, simd_rem, m128i, i16x8); +impl_vvv!("lsx", lsx_vmod_w, simd_rem, m128i, i32x4); +impl_vvv!("lsx", lsx_vmod_d, simd_rem, m128i, i64x2); +impl_vvv!("lsx", lsx_vmod_bu, simd_rem, m128i, u8x16); +impl_vvv!("lsx", lsx_vmod_hu, simd_rem, m128i, u16x8); +impl_vvv!("lsx", lsx_vmod_wu, simd_rem, m128i, u32x4); +impl_vvv!("lsx", lsx_vmod_du, simd_rem, m128i, u64x2); +impl_vvv!("lsx", lsx_vand_v, simd_and, m128i, u8x16); +impl_vvv!("lsx", lsx_vandn_v, simdl_andn, m128i, u8x16); +impl_vvv!("lsx", lsx_vor_v, simd_or, m128i, u8x16); +impl_vvv!("lsx", lsx_vorn_v, simdl_orn, m128i, u8x16); +impl_vvv!("lsx", lsx_vnor_v, simdl_nor, m128i, u8x16); +impl_vvv!("lsx", lsx_vxor_v, simd_xor, m128i, u8x16); +impl_vvv!("lsx", lsx_vfadd_s, simd_add, m128, f32x4); +impl_vvv!("lsx", lsx_vfadd_d, simd_add, m128d, f64x2); +impl_vvv!("lsx", lsx_vfsub_s, simd_sub, m128, f32x4); +impl_vvv!("lsx", lsx_vfsub_d, simd_sub, m128d, f64x2); +impl_vvv!("lsx", lsx_vfmul_s, simd_mul, m128, f32x4); +impl_vvv!("lsx", lsx_vfmul_d, simd_mul, m128d, f64x2); +impl_vvv!("lsx", lsx_vfdiv_s, simd_div, m128, f32x4); +impl_vvv!("lsx", lsx_vfdiv_d, simd_div, m128d, f64x2); +impl_vvv!("lsx", lsx_vsll_b, simdl_shl, m128i, i8x16); +impl_vvv!("lsx", lsx_vsll_h, simdl_shl, m128i, i16x8); +impl_vvv!("lsx", lsx_vsll_w, simdl_shl, m128i, i32x4); +impl_vvv!("lsx", lsx_vsll_d, simdl_shl, m128i, i64x2); +impl_vvv!("lsx", lsx_vsra_b, simdl_shr, m128i, i8x16); +impl_vvv!("lsx", lsx_vsra_h, simdl_shr, m128i, i16x8); +impl_vvv!("lsx", lsx_vsra_w, simdl_shr, m128i, i32x4); +impl_vvv!("lsx", lsx_vsra_d, simdl_shr, m128i, i64x2); +impl_vvv!("lsx", lsx_vsrl_b, simdl_shr, m128i, u8x16); +impl_vvv!("lsx", lsx_vsrl_h, simdl_shr, m128i, u16x8); +impl_vvv!("lsx", lsx_vsrl_w, simdl_shr, m128i, u32x4); +impl_vvv!("lsx", lsx_vsrl_d, simdl_shr, m128i, u64x2); + +impl_vuv!("lsx", lsx_vslli_b, simd_shl, m128i, i8x16); +impl_vuv!("lsx", lsx_vslli_h, simd_shl, m128i, i16x8); +impl_vuv!("lsx", lsx_vslli_w, simd_shl, m128i, i32x4); +impl_vuv!("lsx", lsx_vslli_d, simd_shl, m128i, i64x2); +impl_vuv!("lsx", lsx_vsrai_b, simd_shr, m128i, i8x16); +impl_vuv!("lsx", lsx_vsrai_h, simd_shr, m128i, i16x8); +impl_vuv!("lsx", lsx_vsrai_w, simd_shr, m128i, i32x4); +impl_vuv!("lsx", lsx_vsrai_d, simd_shr, m128i, i64x2); +impl_vuv!("lsx", lsx_vsrli_b, simd_shr, m128i, u8x16); +impl_vuv!("lsx", lsx_vsrli_h, simd_shr, m128i, u16x8); +impl_vuv!("lsx", lsx_vsrli_w, simd_shr, m128i, u32x4); +impl_vuv!("lsx", lsx_vsrli_d, simd_shr, m128i, u64x2); +impl_vuv!("lsx", lsx_vaddi_bu, simd_add, m128i, u8x16, 5); +impl_vuv!("lsx", lsx_vaddi_hu, simd_add, m128i, u16x8, 5); +impl_vuv!("lsx", lsx_vaddi_wu, simd_add, m128i, u32x4, 5); +impl_vuv!("lsx", lsx_vaddi_du, simd_add, m128i, u64x2, 5); +impl_vuv!("lsx", lsx_vslti_bu, simd_lt, m128i, u8x16, 5); +impl_vuv!("lsx", lsx_vslti_hu, simd_lt, m128i, u16x8, 5); +impl_vuv!("lsx", lsx_vslti_wu, simd_lt, m128i, u32x4, 5); +impl_vuv!("lsx", lsx_vslti_du, simd_lt, m128i, u64x2, 5); +impl_vuv!("lsx", lsx_vslei_bu, simd_le, m128i, u8x16, 5); +impl_vuv!("lsx", lsx_vslei_hu, simd_le, m128i, u16x8, 5); +impl_vuv!("lsx", lsx_vslei_wu, simd_le, m128i, u32x4, 5); +impl_vuv!("lsx", lsx_vslei_du, simd_le, m128i, u64x2, 5); +impl_vuv!("lsx", lsx_vmaxi_bu, simd_imax, m128i, u8x16, 5); +impl_vuv!("lsx", lsx_vmaxi_hu, simd_imax, m128i, u16x8, 5); +impl_vuv!("lsx", lsx_vmaxi_wu, simd_imax, m128i, u32x4, 5); +impl_vuv!("lsx", lsx_vmaxi_du, simd_imax, m128i, u64x2, 5); +impl_vuv!("lsx", lsx_vmini_bu, simd_imin, m128i, u8x16, 5); +impl_vuv!("lsx", lsx_vmini_hu, simd_imin, m128i, u16x8, 5); +impl_vuv!("lsx", lsx_vmini_wu, simd_imin, m128i, u32x4, 5); +impl_vuv!("lsx", lsx_vmini_du, simd_imin, m128i, u64x2, 5); + +impl_vug!("lsx", lsx_vpickve2gr_b, simd_extract, m128i, i8x16, i32, 4); +impl_vug!("lsx", lsx_vpickve2gr_h, simd_extract, m128i, i16x8, i32, 3); +impl_vug!("lsx", lsx_vpickve2gr_w, simd_extract, m128i, i32x4, i32, 2); +impl_vug!("lsx", lsx_vpickve2gr_d, simd_extract, m128i, i64x2, i64, 1); +impl_vug!("lsx", lsx_vpickve2gr_bu, simd_extract, m128i, u8x16, u32, 4); +impl_vug!("lsx", lsx_vpickve2gr_hu, simd_extract, m128i, u16x8, u32, 3); +impl_vug!("lsx", lsx_vpickve2gr_wu, simd_extract, m128i, u32x4, u32, 2); +impl_vug!("lsx", lsx_vpickve2gr_du, simd_extract, m128i, u64x2, u64, 1); + +impl_vsv!("lsx", lsx_vseqi_b, simd_eq, m128i, i8x16, 5); +impl_vsv!("lsx", lsx_vseqi_h, simd_eq, m128i, i16x8, 5); +impl_vsv!("lsx", lsx_vseqi_w, simd_eq, m128i, i32x4, 5); +impl_vsv!("lsx", lsx_vseqi_d, simd_eq, m128i, i64x2, 5); +impl_vsv!("lsx", lsx_vslti_b, simd_lt, m128i, i8x16, 5); +impl_vsv!("lsx", lsx_vslti_h, simd_lt, m128i, i16x8, 5); +impl_vsv!("lsx", lsx_vslti_w, simd_lt, m128i, i32x4, 5); +impl_vsv!("lsx", lsx_vslti_d, simd_lt, m128i, i64x2, 5); +impl_vsv!("lsx", lsx_vslei_b, simd_le, m128i, i8x16, 5); +impl_vsv!("lsx", lsx_vslei_h, simd_le, m128i, i16x8, 5); +impl_vsv!("lsx", lsx_vslei_w, simd_le, m128i, i32x4, 5); +impl_vsv!("lsx", lsx_vslei_d, simd_le, m128i, i64x2, 5); +impl_vsv!("lsx", lsx_vmaxi_b, simd_imax, m128i, i8x16, 5); +impl_vsv!("lsx", lsx_vmaxi_h, simd_imax, m128i, i16x8, 5); +impl_vsv!("lsx", lsx_vmaxi_w, simd_imax, m128i, i32x4, 5); +impl_vsv!("lsx", lsx_vmaxi_d, simd_imax, m128i, i64x2, 5); +impl_vsv!("lsx", lsx_vmini_b, simd_imin, m128i, i8x16, 5); +impl_vsv!("lsx", lsx_vmini_h, simd_imin, m128i, i16x8, 5); +impl_vsv!("lsx", lsx_vmini_w, simd_imin, m128i, i32x4, 5); +impl_vsv!("lsx", lsx_vmini_d, simd_imin, m128i, i64x2, 5); + +impl_vvvv!("lsx", lsx_vmadd_b, simdl_madd, m128i, i8x16); +impl_vvvv!("lsx", lsx_vmadd_h, simdl_madd, m128i, i16x8); +impl_vvvv!("lsx", lsx_vmadd_w, simdl_madd, m128i, i32x4); +impl_vvvv!("lsx", lsx_vmadd_d, simdl_madd, m128i, i64x2); +impl_vvvv!("lsx", lsx_vmsub_b, simdl_msub, m128i, i8x16); +impl_vvvv!("lsx", lsx_vmsub_h, simdl_msub, m128i, i16x8); +impl_vvvv!("lsx", lsx_vmsub_w, simdl_msub, m128i, i32x4); +impl_vvvv!("lsx", lsx_vmsub_d, simdl_msub, m128i, i64x2); +impl_vvvv!("lsx", lsx_vfmadd_s, simd_fma, m128, f32x4); +impl_vvvv!("lsx", lsx_vfmadd_d, simd_fma, m128d, f64x2); +impl_vvvv!("lsx", lsx_vfmsub_s, simdl_fms, m128, f32x4); +impl_vvvv!("lsx", lsx_vfmsub_d, simdl_fms, m128d, f64x2); +impl_vvvv!("lsx", lsx_vfnmadd_s, simdl_nfma, m128, f32x4); +impl_vvvv!("lsx", lsx_vfnmadd_d, simdl_nfma, m128d, f64x2); +impl_vvvv!("lsx", lsx_vfnmsub_s, simdl_nfms, m128, f32x4); +impl_vvvv!("lsx", lsx_vfnmsub_d, simdl_nfms, m128d, f64x2); + +impl_vugv!("lsx", lsx_vinsgr2vr_b, simd_insert, m128i, i8x16, i32, 4); +impl_vugv!("lsx", lsx_vinsgr2vr_h, simd_insert, m128i, i16x8, i32, 3); +impl_vugv!("lsx", lsx_vinsgr2vr_w, simd_insert, m128i, i32x4, i32, 2); +impl_vugv!("lsx", lsx_vinsgr2vr_d, simd_insert, m128i, i64x2, i64, 1); diff --git a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs index 41c21aac2a574..f464dbd356b7f 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/mod.rs @@ -2,6 +2,7 @@ mod lasx; mod lsx; +mod simd; #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub use self::lasx::*; diff --git a/library/stdarch/crates/core_arch/src/loongarch64/simd.rs b/library/stdarch/crates/core_arch/src/loongarch64/simd.rs new file mode 100644 index 0000000000000..ac98d6ac5dcda --- /dev/null +++ b/library/stdarch/crates/core_arch/src/loongarch64/simd.rs @@ -0,0 +1,302 @@ +//! LoongArch64 SIMD helpers + +pub(super) const trait SimdL: Sized { + type Elem; + + unsafe fn splat(v: i64) -> Self; +} + +macro_rules! impl_simdl { + ($v:ident, $e:ty) => { + #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] + impl const SimdL for crate::core_arch::simd::$v { + type Elem = $e; + + #[inline(always)] + unsafe fn splat(v: i64) -> Self { + crate::intrinsics::simd::simd_splat(v as Self::Elem) + } + } + }; +} + +impl_simdl!(i8x16, i8); +impl_simdl!(i8x32, i8); +impl_simdl!(u8x16, u8); +impl_simdl!(u8x32, u8); +impl_simdl!(i16x8, i16); +impl_simdl!(i16x16, i16); +impl_simdl!(u16x8, u16); +impl_simdl!(u16x16, u16); +impl_simdl!(i32x4, i32); +impl_simdl!(i32x8, i32); +impl_simdl!(u32x4, u32); +impl_simdl!(u32x8, u32); +impl_simdl!(i64x2, i64); +impl_simdl!(i64x4, i64); +impl_simdl!(u64x2, u64); +impl_simdl!(u64x4, u64); + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_andn(a: T, b: T) -> T { + crate::intrinsics::simd::simd_and(simdl_not(a), b) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_fms(a: T, b: T, c: T) -> T { + let c: T = crate::intrinsics::simd::simd_neg(c); + crate::intrinsics::simd::simd_fma(a, b, c) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_madd(a: T, b: T, c: T) -> T { + let mul: T = crate::intrinsics::simd::simd_mul(b, c); + crate::intrinsics::simd::simd_add(mul, a) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_msub(a: T, b: T, c: T) -> T { + let mul: T = crate::intrinsics::simd::simd_mul(b, c); + crate::intrinsics::simd::simd_sub(a, mul) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_nfma(a: T, b: T, c: T) -> T { + let fma: T = crate::intrinsics::simd::simd_fma(a, b, c); + crate::intrinsics::simd::simd_neg(fma) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_nfms(a: T, b: T, c: T) -> T { + let fma: T = simdl_fms(a, b, c); + crate::intrinsics::simd::simd_neg(fma) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_nor(a: T, b: T) -> T { + let or: T = crate::intrinsics::simd::simd_or(a, b); + simdl_not(or) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_not(a: T) -> T { + let not: T = simdl_splat(!0); + crate::intrinsics::simd::simd_xor(a, not) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_orn(a: T, b: T) -> T { + crate::intrinsics::simd::simd_or(a, simdl_not(b)) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_shl(a: T, b: T) -> T { + let m: T = simdl_splat((size_of::() * 8 - 1) as i64); + let b: T = crate::intrinsics::simd::simd_and(b, m); + crate::intrinsics::simd::simd_shl(a, b) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_shr(a: T, b: T) -> T { + let m: T = simdl_splat((size_of::() * 8 - 1) as i64); + let b: T = crate::intrinsics::simd::simd_and(b, m); + crate::intrinsics::simd::simd_shr(a, b) +} + +#[inline(always)] +#[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] +pub(super) const unsafe fn simdl_splat(a: i64) -> T { + T::splat(a) +} + +macro_rules! impl_vv { + ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ty) => { + #[inline(always)] + #[target_feature(enable = $ft)] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub fn $name(a: $oty) -> $oty { + unsafe { + let a: $ity = transmute(a); + let r: $ity = $op(a); + transmute(r) + } + } + }; +} + +pub(super) use impl_vv; + +macro_rules! impl_gv { + ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $gty:ty) => { + #[inline(always)] + #[target_feature(enable = $ft)] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub fn $name(a: $gty) -> $oty { + unsafe { + let r: $ity = $op(a.into()); + transmute(r) + } + } + }; +} + +pub(super) use impl_gv; + +macro_rules! impl_sv { + ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $ibs:expr) => { + #[inline(always)] + #[target_feature(enable = $ft)] + #[rustc_legacy_const_generics(0)] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub fn $name() -> $oty { + static_assert_simm_bits!(IMM, $ibs); + unsafe { + let r: $ity = $op(IMM.into()); + transmute(r) + } + } + }; +} + +pub(super) use impl_sv; + +macro_rules! impl_vvv { + ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ty) => { + #[inline(always)] + #[target_feature(enable = $ft)] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub fn $name(a: $oty, b: $oty) -> $oty { + unsafe { + let a: $ity = transmute(a); + let b: $ity = transmute(b); + let r: $ity = $op(a, b); + transmute(r) + } + } + }; +} + +pub(super) use impl_vvv; + +macro_rules! impl_vuv { + ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident) => { + #[inline(always)] + #[target_feature(enable = $ft)] + #[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub fn $name(a: $oty) -> $oty { + static_assert_uimm_bits!(IMM, (size_of::<<$ity as SimdL>::Elem>() * 8).ilog2()); + unsafe { + let a: $ity = transmute(a); + let b: $ity = simdl_splat(IMM.into()); + let r: $ity = $op(a, b); + transmute(r) + } + } + }; + ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $ibs:expr) => { + #[inline(always)] + #[target_feature(enable = $ft)] + #[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub fn $name(a: $oty) -> $oty { + static_assert_uimm_bits!(IMM, $ibs); + unsafe { + let a: $ity = transmute(a); + let b: $ity = simdl_splat(IMM.into()); + let r: $ity = $op(a, b); + transmute(r) + } + } + }; +} + +pub(super) use impl_vuv; + +macro_rules! impl_vug { + ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $gty:ty, $ibs:expr) => { + #[inline(always)] + #[target_feature(enable = $ft)] + #[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub fn $name(a: $oty) -> $gty { + static_assert_uimm_bits!(IMM, $ibs); + unsafe { + let a: $ity = transmute(a); + let r: <$ity as SimdL>::Elem = $op(a, IMM); + r as $gty + } + } + }; +} + +pub(super) use impl_vug; + +macro_rules! impl_vsv { + ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $ibs:expr) => { + #[inline(always)] + #[target_feature(enable = $ft)] + #[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub fn $name(a: $oty) -> $oty { + static_assert_simm_bits!(IMM, $ibs); + unsafe { + let a: $ity = transmute(a); + let b: $ity = simdl_splat(IMM.into()); + let r: $ity = $op(a, b); + transmute(r) + } + } + }; +} + +pub(super) use impl_vsv; + +macro_rules! impl_vvvv { + ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ty) => { + #[inline(always)] + #[target_feature(enable = $ft)] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub fn $name(a: $oty, b: $oty, c: $oty) -> $oty { + unsafe { + let a: $ity = transmute(a); + let b: $ity = transmute(b); + let c: $ity = transmute(c); + let r: $ity = $op(a, b, c); + transmute(r) + } + } + }; +} + +pub(super) use impl_vvvv; + +macro_rules! impl_vugv { + ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $gty:ty, $ibs:expr) => { + #[inline(always)] + #[target_feature(enable = $ft)] + #[rustc_legacy_const_generics(1)] + #[unstable(feature = "stdarch_loongarch", issue = "117427")] + pub fn $name(a: $oty, b: $gty) -> $oty { + static_assert_uimm_bits!(IMM, $ibs); + unsafe { + let a: $ity = transmute(a); + let r: $ity = $op(a, IMM, b as <$ity as SimdL>::Elem); + transmute(r) + } + } + }; +} + +pub(super) use impl_vugv; diff --git a/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec b/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec index ac4203a03f207..9a9b1a143eff0 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec +++ b/library/stdarch/crates/stdarch-gen-loongarch/lasx.spec @@ -4,81 +4,97 @@ // ``` /// lasx_xvsll_b +impl = portable name = lasx_xvsll_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvsll_h +impl = portable name = lasx_xvsll_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvsll_w +impl = portable name = lasx_xvsll_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvsll_d +impl = portable name = lasx_xvsll_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvslli_b +impl = portable name = lasx_xvslli_b asm-fmts = xd, xj, ui3 data-types = V32QI, V32QI, UQI /// lasx_xvslli_h +impl = portable name = lasx_xvslli_h asm-fmts = xd, xj, ui4 data-types = V16HI, V16HI, UQI /// lasx_xvslli_w +impl = portable name = lasx_xvslli_w asm-fmts = xd, xj, ui5 data-types = V8SI, V8SI, UQI /// lasx_xvslli_d +impl = portable name = lasx_xvslli_d asm-fmts = xd, xj, ui6 data-types = V4DI, V4DI, UQI /// lasx_xvsra_b +impl = portable name = lasx_xvsra_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvsra_h +impl = portable name = lasx_xvsra_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvsra_w +impl = portable name = lasx_xvsra_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvsra_d +impl = portable name = lasx_xvsra_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvsrai_b +impl = portable name = lasx_xvsrai_b asm-fmts = xd, xj, ui3 data-types = V32QI, V32QI, UQI /// lasx_xvsrai_h +impl = portable name = lasx_xvsrai_h asm-fmts = xd, xj, ui4 data-types = V16HI, V16HI, UQI /// lasx_xvsrai_w +impl = portable name = lasx_xvsrai_w asm-fmts = xd, xj, ui5 data-types = V8SI, V8SI, UQI /// lasx_xvsrai_d +impl = portable name = lasx_xvsrai_d asm-fmts = xd, xj, ui6 data-types = V4DI, V4DI, UQI @@ -124,41 +140,49 @@ asm-fmts = xd, xj, ui6 data-types = V4DI, V4DI, UQI /// lasx_xvsrl_b +impl = portable name = lasx_xvsrl_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvsrl_h +impl = portable name = lasx_xvsrl_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvsrl_w +impl = portable name = lasx_xvsrl_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvsrl_d +impl = portable name = lasx_xvsrl_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvsrli_b +impl = portable name = lasx_xvsrli_b asm-fmts = xd, xj, ui3 data-types = V32QI, V32QI, UQI /// lasx_xvsrli_h +impl = portable name = lasx_xvsrli_h asm-fmts = xd, xj, ui4 data-types = V16HI, V16HI, UQI /// lasx_xvsrli_w +impl = portable name = lasx_xvsrli_w asm-fmts = xd, xj, ui5 data-types = V8SI, V8SI, UQI /// lasx_xvsrli_d +impl = portable name = lasx_xvsrli_d asm-fmts = xd, xj, ui6 data-types = V4DI, V4DI, UQI @@ -324,61 +348,73 @@ asm-fmts = xd, xj, ui6 data-types = UV4DI, UV4DI, UQI /// lasx_xvadd_b +impl = portable name = lasx_xvadd_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvadd_h +impl = portable name = lasx_xvadd_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvadd_w +impl = portable name = lasx_xvadd_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvadd_d +impl = portable name = lasx_xvadd_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvaddi_bu +impl = portable name = lasx_xvaddi_bu asm-fmts = xd, xj, ui5 data-types = V32QI, V32QI, UQI /// lasx_xvaddi_hu +impl = portable name = lasx_xvaddi_hu asm-fmts = xd, xj, ui5 data-types = V16HI, V16HI, UQI /// lasx_xvaddi_wu +impl = portable name = lasx_xvaddi_wu asm-fmts = xd, xj, ui5 data-types = V8SI, V8SI, UQI /// lasx_xvaddi_du +impl = portable name = lasx_xvaddi_du asm-fmts = xd, xj, ui5 data-types = V4DI, V4DI, UQI /// lasx_xvsub_b +impl = portable name = lasx_xvsub_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvsub_h +impl = portable name = lasx_xvsub_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvsub_w +impl = portable name = lasx_xvsub_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvsub_d +impl = portable name = lasx_xvsub_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI @@ -404,361 +440,433 @@ asm-fmts = xd, xj, ui5 data-types = V4DI, V4DI, UQI /// lasx_xvmax_b +impl = portable name = lasx_xvmax_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvmax_h +impl = portable name = lasx_xvmax_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvmax_w +impl = portable name = lasx_xvmax_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvmax_d +impl = portable name = lasx_xvmax_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvmaxi_b +impl = portable name = lasx_xvmaxi_b asm-fmts = xd, xj, si5 data-types = V32QI, V32QI, QI /// lasx_xvmaxi_h +impl = portable name = lasx_xvmaxi_h asm-fmts = xd, xj, si5 data-types = V16HI, V16HI, QI /// lasx_xvmaxi_w +impl = portable name = lasx_xvmaxi_w asm-fmts = xd, xj, si5 data-types = V8SI, V8SI, QI /// lasx_xvmaxi_d +impl = portable name = lasx_xvmaxi_d asm-fmts = xd, xj, si5 data-types = V4DI, V4DI, QI /// lasx_xvmax_bu +impl = portable name = lasx_xvmax_bu asm-fmts = xd, xj, xk data-types = UV32QI, UV32QI, UV32QI /// lasx_xvmax_hu +impl = portable name = lasx_xvmax_hu asm-fmts = xd, xj, xk data-types = UV16HI, UV16HI, UV16HI /// lasx_xvmax_wu +impl = portable name = lasx_xvmax_wu asm-fmts = xd, xj, xk data-types = UV8SI, UV8SI, UV8SI /// lasx_xvmax_du +impl = portable name = lasx_xvmax_du asm-fmts = xd, xj, xk data-types = UV4DI, UV4DI, UV4DI /// lasx_xvmaxi_bu +impl = portable name = lasx_xvmaxi_bu asm-fmts = xd, xj, ui5 data-types = UV32QI, UV32QI, UQI /// lasx_xvmaxi_hu +impl = portable name = lasx_xvmaxi_hu asm-fmts = xd, xj, ui5 data-types = UV16HI, UV16HI, UQI /// lasx_xvmaxi_wu +impl = portable name = lasx_xvmaxi_wu asm-fmts = xd, xj, ui5 data-types = UV8SI, UV8SI, UQI /// lasx_xvmaxi_du +impl = portable name = lasx_xvmaxi_du asm-fmts = xd, xj, ui5 data-types = UV4DI, UV4DI, UQI /// lasx_xvmin_b +impl = portable name = lasx_xvmin_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvmin_h +impl = portable name = lasx_xvmin_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvmin_w +impl = portable name = lasx_xvmin_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvmin_d +impl = portable name = lasx_xvmin_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvmini_b +impl = portable name = lasx_xvmini_b asm-fmts = xd, xj, si5 data-types = V32QI, V32QI, QI /// lasx_xvmini_h +impl = portable name = lasx_xvmini_h asm-fmts = xd, xj, si5 data-types = V16HI, V16HI, QI /// lasx_xvmini_w +impl = portable name = lasx_xvmini_w asm-fmts = xd, xj, si5 data-types = V8SI, V8SI, QI /// lasx_xvmini_d +impl = portable name = lasx_xvmini_d asm-fmts = xd, xj, si5 data-types = V4DI, V4DI, QI /// lasx_xvmin_bu +impl = portable name = lasx_xvmin_bu asm-fmts = xd, xj, xk data-types = UV32QI, UV32QI, UV32QI /// lasx_xvmin_hu +impl = portable name = lasx_xvmin_hu asm-fmts = xd, xj, xk data-types = UV16HI, UV16HI, UV16HI /// lasx_xvmin_wu +impl = portable name = lasx_xvmin_wu asm-fmts = xd, xj, xk data-types = UV8SI, UV8SI, UV8SI /// lasx_xvmin_du +impl = portable name = lasx_xvmin_du asm-fmts = xd, xj, xk data-types = UV4DI, UV4DI, UV4DI /// lasx_xvmini_bu +impl = portable name = lasx_xvmini_bu asm-fmts = xd, xj, ui5 data-types = UV32QI, UV32QI, UQI /// lasx_xvmini_hu +impl = portable name = lasx_xvmini_hu asm-fmts = xd, xj, ui5 data-types = UV16HI, UV16HI, UQI /// lasx_xvmini_wu +impl = portable name = lasx_xvmini_wu asm-fmts = xd, xj, ui5 data-types = UV8SI, UV8SI, UQI /// lasx_xvmini_du +impl = portable name = lasx_xvmini_du asm-fmts = xd, xj, ui5 data-types = UV4DI, UV4DI, UQI /// lasx_xvseq_b +impl = portable name = lasx_xvseq_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvseq_h +impl = portable name = lasx_xvseq_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvseq_w +impl = portable name = lasx_xvseq_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvseq_d +impl = portable name = lasx_xvseq_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvseqi_b +impl = portable name = lasx_xvseqi_b asm-fmts = xd, xj, si5 data-types = V32QI, V32QI, QI /// lasx_xvseqi_h +impl = portable name = lasx_xvseqi_h asm-fmts = xd, xj, si5 data-types = V16HI, V16HI, QI /// lasx_xvseqi_w +impl = portable name = lasx_xvseqi_w asm-fmts = xd, xj, si5 data-types = V8SI, V8SI, QI /// lasx_xvseqi_d +impl = portable name = lasx_xvseqi_d asm-fmts = xd, xj, si5 data-types = V4DI, V4DI, QI /// lasx_xvslt_b +impl = portable name = lasx_xvslt_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvslt_h +impl = portable name = lasx_xvslt_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvslt_w +impl = portable name = lasx_xvslt_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvslt_d +impl = portable name = lasx_xvslt_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvslti_b +impl = portable name = lasx_xvslti_b asm-fmts = xd, xj, si5 data-types = V32QI, V32QI, QI /// lasx_xvslti_h +impl = portable name = lasx_xvslti_h asm-fmts = xd, xj, si5 data-types = V16HI, V16HI, QI /// lasx_xvslti_w +impl = portable name = lasx_xvslti_w asm-fmts = xd, xj, si5 data-types = V8SI, V8SI, QI /// lasx_xvslti_d +impl = portable name = lasx_xvslti_d asm-fmts = xd, xj, si5 data-types = V4DI, V4DI, QI /// lasx_xvslt_bu +impl = portable name = lasx_xvslt_bu asm-fmts = xd, xj, xk data-types = V32QI, UV32QI, UV32QI /// lasx_xvslt_hu +impl = portable name = lasx_xvslt_hu asm-fmts = xd, xj, xk data-types = V16HI, UV16HI, UV16HI /// lasx_xvslt_wu +impl = portable name = lasx_xvslt_wu asm-fmts = xd, xj, xk data-types = V8SI, UV8SI, UV8SI /// lasx_xvslt_du +impl = portable name = lasx_xvslt_du asm-fmts = xd, xj, xk data-types = V4DI, UV4DI, UV4DI /// lasx_xvslti_bu +impl = portable name = lasx_xvslti_bu asm-fmts = xd, xj, ui5 data-types = V32QI, UV32QI, UQI /// lasx_xvslti_hu +impl = portable name = lasx_xvslti_hu asm-fmts = xd, xj, ui5 data-types = V16HI, UV16HI, UQI /// lasx_xvslti_wu +impl = portable name = lasx_xvslti_wu asm-fmts = xd, xj, ui5 data-types = V8SI, UV8SI, UQI /// lasx_xvslti_du +impl = portable name = lasx_xvslti_du asm-fmts = xd, xj, ui5 data-types = V4DI, UV4DI, UQI /// lasx_xvsle_b +impl = portable name = lasx_xvsle_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvsle_h +impl = portable name = lasx_xvsle_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvsle_w +impl = portable name = lasx_xvsle_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvsle_d +impl = portable name = lasx_xvsle_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvslei_b +impl = portable name = lasx_xvslei_b asm-fmts = xd, xj, si5 data-types = V32QI, V32QI, QI /// lasx_xvslei_h +impl = portable name = lasx_xvslei_h asm-fmts = xd, xj, si5 data-types = V16HI, V16HI, QI /// lasx_xvslei_w +impl = portable name = lasx_xvslei_w asm-fmts = xd, xj, si5 data-types = V8SI, V8SI, QI /// lasx_xvslei_d +impl = portable name = lasx_xvslei_d asm-fmts = xd, xj, si5 data-types = V4DI, V4DI, QI /// lasx_xvsle_bu +impl = portable name = lasx_xvsle_bu asm-fmts = xd, xj, xk data-types = V32QI, UV32QI, UV32QI /// lasx_xvsle_hu +impl = portable name = lasx_xvsle_hu asm-fmts = xd, xj, xk data-types = V16HI, UV16HI, UV16HI /// lasx_xvsle_wu +impl = portable name = lasx_xvsle_wu asm-fmts = xd, xj, xk data-types = V8SI, UV8SI, UV8SI /// lasx_xvsle_du +impl = portable name = lasx_xvsle_du asm-fmts = xd, xj, xk data-types = V4DI, UV4DI, UV4DI /// lasx_xvslei_bu +impl = portable name = lasx_xvslei_bu asm-fmts = xd, xj, ui5 data-types = V32QI, UV32QI, UQI /// lasx_xvslei_hu +impl = portable name = lasx_xvslei_hu asm-fmts = xd, xj, ui5 data-types = V16HI, UV16HI, UQI /// lasx_xvslei_wu +impl = portable name = lasx_xvslei_wu asm-fmts = xd, xj, ui5 data-types = V8SI, UV8SI, UQI /// lasx_xvslei_du +impl = portable name = lasx_xvslei_du asm-fmts = xd, xj, ui5 data-types = V4DI, UV4DI, UQI @@ -1024,101 +1132,121 @@ asm-fmts = xd, xj, xk data-types = UV4DI, UV4DI, UV4DI /// lasx_xvmul_b +impl = portable name = lasx_xvmul_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvmul_h +impl = portable name = lasx_xvmul_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvmul_w +impl = portable name = lasx_xvmul_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvmul_d +impl = portable name = lasx_xvmul_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvmadd_b +impl = portable name = lasx_xvmadd_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI, V32QI /// lasx_xvmadd_h +impl = portable name = lasx_xvmadd_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI, V16HI /// lasx_xvmadd_w +impl = portable name = lasx_xvmadd_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI, V8SI /// lasx_xvmadd_d +impl = portable name = lasx_xvmadd_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI, V4DI /// lasx_xvmsub_b +impl = portable name = lasx_xvmsub_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI, V32QI /// lasx_xvmsub_h +impl = portable name = lasx_xvmsub_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI, V16HI /// lasx_xvmsub_w +impl = portable name = lasx_xvmsub_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI, V8SI /// lasx_xvmsub_d +impl = portable name = lasx_xvmsub_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI, V4DI /// lasx_xvdiv_b +impl = portable name = lasx_xvdiv_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvdiv_h +impl = portable name = lasx_xvdiv_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvdiv_w +impl = portable name = lasx_xvdiv_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvdiv_d +impl = portable name = lasx_xvdiv_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvdiv_bu +impl = portable name = lasx_xvdiv_bu asm-fmts = xd, xj, xk data-types = UV32QI, UV32QI, UV32QI /// lasx_xvdiv_hu +impl = portable name = lasx_xvdiv_hu asm-fmts = xd, xj, xk data-types = UV16HI, UV16HI, UV16HI /// lasx_xvdiv_wu +impl = portable name = lasx_xvdiv_wu asm-fmts = xd, xj, xk data-types = UV8SI, UV8SI, UV8SI /// lasx_xvdiv_du +impl = portable name = lasx_xvdiv_du asm-fmts = xd, xj, xk data-types = UV4DI, UV4DI, UV4DI @@ -1184,41 +1312,49 @@ asm-fmts = xd, xj, xk data-types = V4DI, UV8SI, UV8SI /// lasx_xvmod_b +impl = portable name = lasx_xvmod_b asm-fmts = xd, xj, xk data-types = V32QI, V32QI, V32QI /// lasx_xvmod_h +impl = portable name = lasx_xvmod_h asm-fmts = xd, xj, xk data-types = V16HI, V16HI, V16HI /// lasx_xvmod_w +impl = portable name = lasx_xvmod_w asm-fmts = xd, xj, xk data-types = V8SI, V8SI, V8SI /// lasx_xvmod_d +impl = portable name = lasx_xvmod_d asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvmod_bu +impl = portable name = lasx_xvmod_bu asm-fmts = xd, xj, xk data-types = UV32QI, UV32QI, UV32QI /// lasx_xvmod_hu +impl = portable name = lasx_xvmod_hu asm-fmts = xd, xj, xk data-types = UV16HI, UV16HI, UV16HI /// lasx_xvmod_wu +impl = portable name = lasx_xvmod_wu asm-fmts = xd, xj, xk data-types = UV8SI, UV8SI, UV8SI /// lasx_xvmod_du +impl = portable name = lasx_xvmod_du asm-fmts = xd, xj, xk data-types = UV4DI, UV4DI, UV4DI @@ -1384,6 +1520,7 @@ asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI, V4DI /// lasx_xvand_v +impl = portable name = lasx_xvand_v asm-fmts = xd, xj, xk data-types = UV32QI, UV32QI, UV32QI @@ -1394,6 +1531,7 @@ asm-fmts = xd, xj, ui8 data-types = UV32QI, UV32QI, UQI /// lasx_xvor_v +impl = portable name = lasx_xvor_v asm-fmts = xd, xj, xk data-types = UV32QI, UV32QI, UV32QI @@ -1404,6 +1542,7 @@ asm-fmts = xd, xj, ui8 data-types = UV32QI, UV32QI, UQI /// lasx_xvnor_v +impl = portable name = lasx_xvnor_v asm-fmts = xd, xj, xk data-types = UV32QI, UV32QI, UV32QI @@ -1414,6 +1553,7 @@ asm-fmts = xd, xj, ui8 data-types = UV32QI, UV32QI, UQI /// lasx_xvxor_v +impl = portable name = lasx_xvxor_v asm-fmts = xd, xj, xk data-types = UV32QI, UV32QI, UV32QI @@ -1449,41 +1589,49 @@ asm-fmts = xd, xj, ui8 data-types = V8SI, V8SI, USI /// lasx_xvreplgr2vr_b +impl = portable name = lasx_xvreplgr2vr_b asm-fmts = xd, rj data-types = V32QI, SI /// lasx_xvreplgr2vr_h +impl = portable name = lasx_xvreplgr2vr_h asm-fmts = xd, rj data-types = V16HI, SI /// lasx_xvreplgr2vr_w +impl = portable name = lasx_xvreplgr2vr_w asm-fmts = xd, rj data-types = V8SI, SI /// lasx_xvreplgr2vr_d +impl = portable name = lasx_xvreplgr2vr_d asm-fmts = xd, rj data-types = V4DI, DI /// lasx_xvpcnt_b +impl = portable name = lasx_xvpcnt_b asm-fmts = xd, xj data-types = V32QI, V32QI /// lasx_xvpcnt_h +impl = portable name = lasx_xvpcnt_h asm-fmts = xd, xj data-types = V16HI, V16HI /// lasx_xvpcnt_w +impl = portable name = lasx_xvpcnt_w asm-fmts = xd, xj data-types = V8SI, V8SI /// lasx_xvpcnt_d +impl = portable name = lasx_xvpcnt_d asm-fmts = xd, xj data-types = V4DI, V4DI @@ -1509,61 +1657,73 @@ asm-fmts = xd, xj data-types = V4DI, V4DI /// lasx_xvclz_b +impl = portable name = lasx_xvclz_b asm-fmts = xd, xj data-types = V32QI, V32QI /// lasx_xvclz_h +impl = portable name = lasx_xvclz_h asm-fmts = xd, xj data-types = V16HI, V16HI /// lasx_xvclz_w +impl = portable name = lasx_xvclz_w asm-fmts = xd, xj data-types = V8SI, V8SI /// lasx_xvclz_d +impl = portable name = lasx_xvclz_d asm-fmts = xd, xj data-types = V4DI, V4DI /// lasx_xvfadd_s +impl = portable name = lasx_xvfadd_s asm-fmts = xd, xj, xk data-types = V8SF, V8SF, V8SF /// lasx_xvfadd_d +impl = portable name = lasx_xvfadd_d asm-fmts = xd, xj, xk data-types = V4DF, V4DF, V4DF /// lasx_xvfsub_s +impl = portable name = lasx_xvfsub_s asm-fmts = xd, xj, xk data-types = V8SF, V8SF, V8SF /// lasx_xvfsub_d +impl = portable name = lasx_xvfsub_d asm-fmts = xd, xj, xk data-types = V4DF, V4DF, V4DF /// lasx_xvfmul_s +impl = portable name = lasx_xvfmul_s asm-fmts = xd, xj, xk data-types = V8SF, V8SF, V8SF /// lasx_xvfmul_d +impl = portable name = lasx_xvfmul_d asm-fmts = xd, xj, xk data-types = V4DF, V4DF, V4DF /// lasx_xvfdiv_s +impl = portable name = lasx_xvfdiv_s asm-fmts = xd, xj, xk data-types = V8SF, V8SF, V8SF /// lasx_xvfdiv_d +impl = portable name = lasx_xvfdiv_d asm-fmts = xd, xj, xk data-types = V4DF, V4DF, V4DF @@ -1629,11 +1789,13 @@ asm-fmts = xd, xj data-types = V4DI, V4DF /// lasx_xvfsqrt_s +impl = portable name = lasx_xvfsqrt_s asm-fmts = xd, xj data-types = V8SF, V8SF /// lasx_xvfsqrt_d +impl = portable name = lasx_xvfsqrt_d asm-fmts = xd, xj data-types = V4DF, V4DF @@ -1804,26 +1966,31 @@ asm-fmts = xd, xj, ui8 data-types = V8SI, V8SI, V8SI, USI /// lasx_xvandn_v +impl = portable name = lasx_xvandn_v asm-fmts = xd, xj, xk data-types = UV32QI, UV32QI, UV32QI /// lasx_xvneg_b +impl = portable name = lasx_xvneg_b asm-fmts = xd, xj data-types = V32QI, V32QI /// lasx_xvneg_h +impl = portable name = lasx_xvneg_h asm-fmts = xd, xj data-types = V16HI, V16HI /// lasx_xvneg_w +impl = portable name = lasx_xvneg_w asm-fmts = xd, xj data-types = V8SI, V8SI /// lasx_xvneg_d +impl = portable name = lasx_xvneg_d asm-fmts = xd, xj data-types = V4DI, V4DI @@ -2144,41 +2311,49 @@ asm-fmts = xd, xj, xk data-types = V4DI, V4DI, V4DI /// lasx_xvfmadd_s +impl = portable name = lasx_xvfmadd_s asm-fmts = xd, xj, xk, xa data-types = V8SF, V8SF, V8SF, V8SF /// lasx_xvfmadd_d +impl = portable name = lasx_xvfmadd_d asm-fmts = xd, xj, xk, xa data-types = V4DF, V4DF, V4DF, V4DF /// lasx_xvfmsub_s +impl = portable name = lasx_xvfmsub_s asm-fmts = xd, xj, xk, xa data-types = V8SF, V8SF, V8SF, V8SF /// lasx_xvfmsub_d +impl = portable name = lasx_xvfmsub_d asm-fmts = xd, xj, xk, xa data-types = V4DF, V4DF, V4DF, V4DF /// lasx_xvfnmadd_s +impl = portable name = lasx_xvfnmadd_s asm-fmts = xd, xj, xk, xa data-types = V8SF, V8SF, V8SF, V8SF /// lasx_xvfnmadd_d +impl = portable name = lasx_xvfnmadd_d asm-fmts = xd, xj, xk, xa data-types = V4DF, V4DF, V4DF, V4DF /// lasx_xvfnmsub_s +impl = portable name = lasx_xvfnmsub_s asm-fmts = xd, xj, xk, xa data-types = V8SF, V8SF, V8SF, V8SF /// lasx_xvfnmsub_d +impl = portable name = lasx_xvfnmsub_d asm-fmts = xd, xj, xk, xa data-types = V4DF, V4DF, V4DF, V4DF @@ -2424,6 +2599,7 @@ asm-fmts = xd, xj, xk data-types = V8SI, V4DI, V4DI /// lasx_xvorn_v +impl = portable name = lasx_xvorn_v asm-fmts = xd, xj, xk data-types = UV32QI, UV32QI, UV32QI @@ -2449,11 +2625,13 @@ asm-fmts = xd, xj data-types = UV4DI, UV4DI /// lasx_xvinsgr2vr_w +impl = portable name = lasx_xvinsgr2vr_w asm-fmts = xd, rj, ui3 data-types = V8SI, V8SI, SI, UQI /// lasx_xvinsgr2vr_d +impl = portable name = lasx_xvinsgr2vr_d asm-fmts = xd, rj, ui2 data-types = V4DI, V4DI, DI, UQI @@ -2579,21 +2757,25 @@ asm-fmts = xd, rj, si9 data-types = V4DI, CVPOINTER, SI /// lasx_xvpickve2gr_w +impl = portable name = lasx_xvpickve2gr_w asm-fmts = rd, xj, ui3 data-types = SI, V8SI, UQI /// lasx_xvpickve2gr_wu +impl = portable name = lasx_xvpickve2gr_wu asm-fmts = rd, xj, ui3 data-types = USI, V8SI, UQI /// lasx_xvpickve2gr_d +impl = portable name = lasx_xvpickve2gr_d asm-fmts = rd, xj, ui2 data-types = DI, V4DI, UQI /// lasx_xvpickve2gr_du +impl = portable name = lasx_xvpickve2gr_du asm-fmts = rd, xj, ui2 data-types = UDI, V4DI, UQI @@ -3684,21 +3866,25 @@ asm-fmts = xd, xj, ui3 data-types = V8SF, V8SF, UQI /// lasx_xvrepli_b +impl = portable name = lasx_xvrepli_b asm-fmts = xd, si10 data-types = V32QI, HI /// lasx_xvrepli_d +impl = portable name = lasx_xvrepli_d asm-fmts = xd, si10 data-types = V4DI, HI /// lasx_xvrepli_h +impl = portable name = lasx_xvrepli_h asm-fmts = xd, si10 data-types = V16HI, HI /// lasx_xvrepli_w +impl = portable name = lasx_xvrepli_w asm-fmts = xd, si10 data-types = V8SI, HI diff --git a/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec b/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec index b5497b6e6207e..8fd267889988e 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec +++ b/library/stdarch/crates/stdarch-gen-loongarch/lsx.spec @@ -4,81 +4,97 @@ // ``` /// lsx_vsll_b +impl = portable name = lsx_vsll_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vsll_h +impl = portable name = lsx_vsll_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vsll_w +impl = portable name = lsx_vsll_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vsll_d +impl = portable name = lsx_vsll_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vslli_b +impl = portable name = lsx_vslli_b asm-fmts = vd, vj, ui3 data-types = V16QI, V16QI, UQI /// lsx_vslli_h +impl = portable name = lsx_vslli_h asm-fmts = vd, vj, ui4 data-types = V8HI, V8HI, UQI /// lsx_vslli_w +impl = portable name = lsx_vslli_w asm-fmts = vd, vj, ui5 data-types = V4SI, V4SI, UQI /// lsx_vslli_d +impl = portable name = lsx_vslli_d asm-fmts = vd, vj, ui6 data-types = V2DI, V2DI, UQI /// lsx_vsra_b +impl = portable name = lsx_vsra_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vsra_h +impl = portable name = lsx_vsra_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vsra_w +impl = portable name = lsx_vsra_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vsra_d +impl = portable name = lsx_vsra_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vsrai_b +impl = portable name = lsx_vsrai_b asm-fmts = vd, vj, ui3 data-types = V16QI, V16QI, UQI /// lsx_vsrai_h +impl = portable name = lsx_vsrai_h asm-fmts = vd, vj, ui4 data-types = V8HI, V8HI, UQI /// lsx_vsrai_w +impl = portable name = lsx_vsrai_w asm-fmts = vd, vj, ui5 data-types = V4SI, V4SI, UQI /// lsx_vsrai_d +impl = portable name = lsx_vsrai_d asm-fmts = vd, vj, ui6 data-types = V2DI, V2DI, UQI @@ -124,41 +140,49 @@ asm-fmts = vd, vj, ui6 data-types = V2DI, V2DI, UQI /// lsx_vsrl_b +impl = portable name = lsx_vsrl_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vsrl_h +impl = portable name = lsx_vsrl_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vsrl_w +impl = portable name = lsx_vsrl_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vsrl_d +impl = portable name = lsx_vsrl_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vsrli_b +impl = portable name = lsx_vsrli_b asm-fmts = vd, vj, ui3 data-types = V16QI, V16QI, UQI /// lsx_vsrli_h +impl = portable name = lsx_vsrli_h asm-fmts = vd, vj, ui4 data-types = V8HI, V8HI, UQI /// lsx_vsrli_w +impl = portable name = lsx_vsrli_w asm-fmts = vd, vj, ui5 data-types = V4SI, V4SI, UQI /// lsx_vsrli_d +impl = portable name = lsx_vsrli_d asm-fmts = vd, vj, ui6 data-types = V2DI, V2DI, UQI @@ -324,61 +348,73 @@ asm-fmts = vd, vj, ui6 data-types = UV2DI, UV2DI, UQI /// lsx_vadd_b +impl = portable name = lsx_vadd_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vadd_h +impl = portable name = lsx_vadd_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vadd_w +impl = portable name = lsx_vadd_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vadd_d +impl = portable name = lsx_vadd_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vaddi_bu +impl = portable name = lsx_vaddi_bu asm-fmts = vd, vj, ui5 data-types = V16QI, V16QI, UQI /// lsx_vaddi_hu +impl = portable name = lsx_vaddi_hu asm-fmts = vd, vj, ui5 data-types = V8HI, V8HI, UQI /// lsx_vaddi_wu +impl = portable name = lsx_vaddi_wu asm-fmts = vd, vj, ui5 data-types = V4SI, V4SI, UQI /// lsx_vaddi_du +impl = portable name = lsx_vaddi_du asm-fmts = vd, vj, ui5 data-types = V2DI, V2DI, UQI /// lsx_vsub_b +impl = portable name = lsx_vsub_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vsub_h +impl = portable name = lsx_vsub_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vsub_w +impl = portable name = lsx_vsub_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vsub_d +impl = portable name = lsx_vsub_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI @@ -404,361 +440,433 @@ asm-fmts = vd, vj, ui5 data-types = V2DI, V2DI, UQI /// lsx_vmax_b +impl = portable name = lsx_vmax_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vmax_h +impl = portable name = lsx_vmax_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vmax_w +impl = portable name = lsx_vmax_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vmax_d +impl = portable name = lsx_vmax_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vmaxi_b +impl = portable name = lsx_vmaxi_b asm-fmts = vd, vj, si5 data-types = V16QI, V16QI, QI /// lsx_vmaxi_h +impl = portable name = lsx_vmaxi_h asm-fmts = vd, vj, si5 data-types = V8HI, V8HI, QI /// lsx_vmaxi_w +impl = portable name = lsx_vmaxi_w asm-fmts = vd, vj, si5 data-types = V4SI, V4SI, QI /// lsx_vmaxi_d +impl = portable name = lsx_vmaxi_d asm-fmts = vd, vj, si5 data-types = V2DI, V2DI, QI /// lsx_vmax_bu +impl = portable name = lsx_vmax_bu asm-fmts = vd, vj, vk data-types = UV16QI, UV16QI, UV16QI /// lsx_vmax_hu +impl = portable name = lsx_vmax_hu asm-fmts = vd, vj, vk data-types = UV8HI, UV8HI, UV8HI /// lsx_vmax_wu +impl = portable name = lsx_vmax_wu asm-fmts = vd, vj, vk data-types = UV4SI, UV4SI, UV4SI /// lsx_vmax_du +impl = portable name = lsx_vmax_du asm-fmts = vd, vj, vk data-types = UV2DI, UV2DI, UV2DI /// lsx_vmaxi_bu +impl = portable name = lsx_vmaxi_bu asm-fmts = vd, vj, ui5 data-types = UV16QI, UV16QI, UQI /// lsx_vmaxi_hu +impl = portable name = lsx_vmaxi_hu asm-fmts = vd, vj, ui5 data-types = UV8HI, UV8HI, UQI /// lsx_vmaxi_wu +impl = portable name = lsx_vmaxi_wu asm-fmts = vd, vj, ui5 data-types = UV4SI, UV4SI, UQI /// lsx_vmaxi_du +impl = portable name = lsx_vmaxi_du asm-fmts = vd, vj, ui5 data-types = UV2DI, UV2DI, UQI /// lsx_vmin_b +impl = portable name = lsx_vmin_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vmin_h +impl = portable name = lsx_vmin_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vmin_w +impl = portable name = lsx_vmin_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vmin_d +impl = portable name = lsx_vmin_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vmini_b +impl = portable name = lsx_vmini_b asm-fmts = vd, vj, si5 data-types = V16QI, V16QI, QI /// lsx_vmini_h +impl = portable name = lsx_vmini_h asm-fmts = vd, vj, si5 data-types = V8HI, V8HI, QI /// lsx_vmini_w +impl = portable name = lsx_vmini_w asm-fmts = vd, vj, si5 data-types = V4SI, V4SI, QI /// lsx_vmini_d +impl = portable name = lsx_vmini_d asm-fmts = vd, vj, si5 data-types = V2DI, V2DI, QI /// lsx_vmin_bu +impl = portable name = lsx_vmin_bu asm-fmts = vd, vj, vk data-types = UV16QI, UV16QI, UV16QI /// lsx_vmin_hu +impl = portable name = lsx_vmin_hu asm-fmts = vd, vj, vk data-types = UV8HI, UV8HI, UV8HI /// lsx_vmin_wu +impl = portable name = lsx_vmin_wu asm-fmts = vd, vj, vk data-types = UV4SI, UV4SI, UV4SI /// lsx_vmin_du +impl = portable name = lsx_vmin_du asm-fmts = vd, vj, vk data-types = UV2DI, UV2DI, UV2DI /// lsx_vmini_bu +impl = portable name = lsx_vmini_bu asm-fmts = vd, vj, ui5 data-types = UV16QI, UV16QI, UQI /// lsx_vmini_hu +impl = portable name = lsx_vmini_hu asm-fmts = vd, vj, ui5 data-types = UV8HI, UV8HI, UQI /// lsx_vmini_wu +impl = portable name = lsx_vmini_wu asm-fmts = vd, vj, ui5 data-types = UV4SI, UV4SI, UQI /// lsx_vmini_du +impl = portable name = lsx_vmini_du asm-fmts = vd, vj, ui5 data-types = UV2DI, UV2DI, UQI /// lsx_vseq_b +impl = portable name = lsx_vseq_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vseq_h +impl = portable name = lsx_vseq_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vseq_w +impl = portable name = lsx_vseq_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vseq_d +impl = portable name = lsx_vseq_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vseqi_b +impl = portable name = lsx_vseqi_b asm-fmts = vd, vj, si5 data-types = V16QI, V16QI, QI /// lsx_vseqi_h +impl = portable name = lsx_vseqi_h asm-fmts = vd, vj, si5 data-types = V8HI, V8HI, QI /// lsx_vseqi_w +impl = portable name = lsx_vseqi_w asm-fmts = vd, vj, si5 data-types = V4SI, V4SI, QI /// lsx_vseqi_d +impl = portable name = lsx_vseqi_d asm-fmts = vd, vj, si5 data-types = V2DI, V2DI, QI /// lsx_vslti_b +impl = portable name = lsx_vslti_b asm-fmts = vd, vj, si5 data-types = V16QI, V16QI, QI /// lsx_vslt_b +impl = portable name = lsx_vslt_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vslt_h +impl = portable name = lsx_vslt_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vslt_w +impl = portable name = lsx_vslt_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vslt_d +impl = portable name = lsx_vslt_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vslti_h +impl = portable name = lsx_vslti_h asm-fmts = vd, vj, si5 data-types = V8HI, V8HI, QI /// lsx_vslti_w +impl = portable name = lsx_vslti_w asm-fmts = vd, vj, si5 data-types = V4SI, V4SI, QI /// lsx_vslti_d +impl = portable name = lsx_vslti_d asm-fmts = vd, vj, si5 data-types = V2DI, V2DI, QI /// lsx_vslt_bu +impl = portable name = lsx_vslt_bu asm-fmts = vd, vj, vk data-types = V16QI, UV16QI, UV16QI /// lsx_vslt_hu +impl = portable name = lsx_vslt_hu asm-fmts = vd, vj, vk data-types = V8HI, UV8HI, UV8HI /// lsx_vslt_wu +impl = portable name = lsx_vslt_wu asm-fmts = vd, vj, vk data-types = V4SI, UV4SI, UV4SI /// lsx_vslt_du +impl = portable name = lsx_vslt_du asm-fmts = vd, vj, vk data-types = V2DI, UV2DI, UV2DI /// lsx_vslti_bu +impl = portable name = lsx_vslti_bu asm-fmts = vd, vj, ui5 data-types = V16QI, UV16QI, UQI /// lsx_vslti_hu +impl = portable name = lsx_vslti_hu asm-fmts = vd, vj, ui5 data-types = V8HI, UV8HI, UQI /// lsx_vslti_wu +impl = portable name = lsx_vslti_wu asm-fmts = vd, vj, ui5 data-types = V4SI, UV4SI, UQI /// lsx_vslti_du +impl = portable name = lsx_vslti_du asm-fmts = vd, vj, ui5 data-types = V2DI, UV2DI, UQI /// lsx_vsle_b +impl = portable name = lsx_vsle_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vsle_h +impl = portable name = lsx_vsle_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vsle_w +impl = portable name = lsx_vsle_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vsle_d +impl = portable name = lsx_vsle_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vslei_b +impl = portable name = lsx_vslei_b asm-fmts = vd, vj, si5 data-types = V16QI, V16QI, QI /// lsx_vslei_h +impl = portable name = lsx_vslei_h asm-fmts = vd, vj, si5 data-types = V8HI, V8HI, QI /// lsx_vslei_w +impl = portable name = lsx_vslei_w asm-fmts = vd, vj, si5 data-types = V4SI, V4SI, QI /// lsx_vslei_d +impl = portable name = lsx_vslei_d asm-fmts = vd, vj, si5 data-types = V2DI, V2DI, QI /// lsx_vsle_bu +impl = portable name = lsx_vsle_bu asm-fmts = vd, vj, vk data-types = V16QI, UV16QI, UV16QI /// lsx_vsle_hu +impl = portable name = lsx_vsle_hu asm-fmts = vd, vj, vk data-types = V8HI, UV8HI, UV8HI /// lsx_vsle_wu +impl = portable name = lsx_vsle_wu asm-fmts = vd, vj, vk data-types = V4SI, UV4SI, UV4SI /// lsx_vsle_du +impl = portable name = lsx_vsle_du asm-fmts = vd, vj, vk data-types = V2DI, UV2DI, UV2DI /// lsx_vslei_bu +impl = portable name = lsx_vslei_bu asm-fmts = vd, vj, ui5 data-types = V16QI, UV16QI, UQI /// lsx_vslei_hu +impl = portable name = lsx_vslei_hu asm-fmts = vd, vj, ui5 data-types = V8HI, UV8HI, UQI /// lsx_vslei_wu +impl = portable name = lsx_vslei_wu asm-fmts = vd, vj, ui5 data-types = V4SI, UV4SI, UQI /// lsx_vslei_du +impl = portable name = lsx_vslei_du asm-fmts = vd, vj, ui5 data-types = V2DI, UV2DI, UQI @@ -1024,101 +1132,121 @@ asm-fmts = vd, vj, vk data-types = UV2DI, UV2DI, UV2DI /// lsx_vmul_b +impl = portable name = lsx_vmul_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vmul_h +impl = portable name = lsx_vmul_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vmul_w +impl = portable name = lsx_vmul_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vmul_d +impl = portable name = lsx_vmul_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vmadd_b +impl = portable name = lsx_vmadd_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI, V16QI /// lsx_vmadd_h +impl = portable name = lsx_vmadd_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI, V8HI /// lsx_vmadd_w +impl = portable name = lsx_vmadd_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI, V4SI /// lsx_vmadd_d +impl = portable name = lsx_vmadd_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI, V2DI /// lsx_vmsub_b +impl = portable name = lsx_vmsub_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI, V16QI /// lsx_vmsub_h +impl = portable name = lsx_vmsub_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI, V8HI /// lsx_vmsub_w +impl = portable name = lsx_vmsub_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI, V4SI /// lsx_vmsub_d +impl = portable name = lsx_vmsub_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI, V2DI /// lsx_vdiv_b +impl = portable name = lsx_vdiv_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vdiv_h +impl = portable name = lsx_vdiv_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vdiv_w +impl = portable name = lsx_vdiv_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vdiv_d +impl = portable name = lsx_vdiv_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vdiv_bu +impl = portable name = lsx_vdiv_bu asm-fmts = vd, vj, vk data-types = UV16QI, UV16QI, UV16QI /// lsx_vdiv_hu +impl = portable name = lsx_vdiv_hu asm-fmts = vd, vj, vk data-types = UV8HI, UV8HI, UV8HI /// lsx_vdiv_wu +impl = portable name = lsx_vdiv_wu asm-fmts = vd, vj, vk data-types = UV4SI, UV4SI, UV4SI /// lsx_vdiv_du +impl = portable name = lsx_vdiv_du asm-fmts = vd, vj, vk data-types = UV2DI, UV2DI, UV2DI @@ -1184,41 +1312,49 @@ asm-fmts = vd, vj, vk data-types = V2DI, UV4SI, UV4SI /// lsx_vmod_b +impl = portable name = lsx_vmod_b asm-fmts = vd, vj, vk data-types = V16QI, V16QI, V16QI /// lsx_vmod_h +impl = portable name = lsx_vmod_h asm-fmts = vd, vj, vk data-types = V8HI, V8HI, V8HI /// lsx_vmod_w +impl = portable name = lsx_vmod_w asm-fmts = vd, vj, vk data-types = V4SI, V4SI, V4SI /// lsx_vmod_d +impl = portable name = lsx_vmod_d asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vmod_bu +impl = portable name = lsx_vmod_bu asm-fmts = vd, vj, vk data-types = UV16QI, UV16QI, UV16QI /// lsx_vmod_hu +impl = portable name = lsx_vmod_hu asm-fmts = vd, vj, vk data-types = UV8HI, UV8HI, UV8HI /// lsx_vmod_wu +impl = portable name = lsx_vmod_wu asm-fmts = vd, vj, vk data-types = UV4SI, UV4SI, UV4SI /// lsx_vmod_du +impl = portable name = lsx_vmod_du asm-fmts = vd, vj, vk data-types = UV2DI, UV2DI, UV2DI @@ -1399,6 +1535,7 @@ asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI, V2DI /// lsx_vand_v +impl = portable name = lsx_vand_v asm-fmts = vd, vj, vk data-types = UV16QI, UV16QI, UV16QI @@ -1409,6 +1546,7 @@ asm-fmts = vd, vj, ui8 data-types = UV16QI, UV16QI, UQI /// lsx_vor_v +impl = portable name = lsx_vor_v asm-fmts = vd, vj, vk data-types = UV16QI, UV16QI, UV16QI @@ -1419,6 +1557,7 @@ asm-fmts = vd, vj, ui8 data-types = UV16QI, UV16QI, UQI /// lsx_vnor_v +impl = portable name = lsx_vnor_v asm-fmts = vd, vj, vk data-types = UV16QI, UV16QI, UV16QI @@ -1429,6 +1568,7 @@ asm-fmts = vd, vj, ui8 data-types = UV16QI, UV16QI, UQI /// lsx_vxor_v +impl = portable name = lsx_vxor_v asm-fmts = vd, vj, vk data-types = UV16QI, UV16QI, UV16QI @@ -1464,41 +1604,49 @@ asm-fmts = vd, vj, ui8 data-types = V4SI, V4SI, USI /// lsx_vreplgr2vr_b +impl = portable name = lsx_vreplgr2vr_b asm-fmts = vd, rj data-types = V16QI, SI /// lsx_vreplgr2vr_h +impl = portable name = lsx_vreplgr2vr_h asm-fmts = vd, rj data-types = V8HI, SI /// lsx_vreplgr2vr_w +impl = portable name = lsx_vreplgr2vr_w asm-fmts = vd, rj data-types = V4SI, SI /// lsx_vreplgr2vr_d +impl = portable name = lsx_vreplgr2vr_d asm-fmts = vd, rj data-types = V2DI, DI /// lsx_vpcnt_b +impl = portable name = lsx_vpcnt_b asm-fmts = vd, vj data-types = V16QI, V16QI /// lsx_vpcnt_h +impl = portable name = lsx_vpcnt_h asm-fmts = vd, vj data-types = V8HI, V8HI /// lsx_vpcnt_w +impl = portable name = lsx_vpcnt_w asm-fmts = vd, vj data-types = V4SI, V4SI /// lsx_vpcnt_d +impl = portable name = lsx_vpcnt_d asm-fmts = vd, vj data-types = V2DI, V2DI @@ -1524,121 +1672,145 @@ asm-fmts = vd, vj data-types = V2DI, V2DI /// lsx_vclz_b +impl = portable name = lsx_vclz_b asm-fmts = vd, vj data-types = V16QI, V16QI /// lsx_vclz_h +impl = portable name = lsx_vclz_h asm-fmts = vd, vj data-types = V8HI, V8HI /// lsx_vclz_w +impl = portable name = lsx_vclz_w asm-fmts = vd, vj data-types = V4SI, V4SI /// lsx_vclz_d +impl = portable name = lsx_vclz_d asm-fmts = vd, vj data-types = V2DI, V2DI /// lsx_vpickve2gr_b +impl = portable name = lsx_vpickve2gr_b asm-fmts = rd, vj, ui4 data-types = SI, V16QI, UQI /// lsx_vpickve2gr_h +impl = portable name = lsx_vpickve2gr_h asm-fmts = rd, vj, ui3 data-types = SI, V8HI, UQI /// lsx_vpickve2gr_w +impl = portable name = lsx_vpickve2gr_w asm-fmts = rd, vj, ui2 data-types = SI, V4SI, UQI /// lsx_vpickve2gr_d +impl = portable name = lsx_vpickve2gr_d asm-fmts = rd, vj, ui1 data-types = DI, V2DI, UQI /// lsx_vpickve2gr_bu +impl = portable name = lsx_vpickve2gr_bu asm-fmts = rd, vj, ui4 data-types = USI, V16QI, UQI /// lsx_vpickve2gr_hu +impl = portable name = lsx_vpickve2gr_hu asm-fmts = rd, vj, ui3 data-types = USI, V8HI, UQI /// lsx_vpickve2gr_wu +impl = portable name = lsx_vpickve2gr_wu asm-fmts = rd, vj, ui2 data-types = USI, V4SI, UQI /// lsx_vpickve2gr_du +impl = portable name = lsx_vpickve2gr_du asm-fmts = rd, vj, ui1 data-types = UDI, V2DI, UQI /// lsx_vinsgr2vr_b +impl = portable name = lsx_vinsgr2vr_b asm-fmts = vd, rj, ui4 data-types = V16QI, V16QI, SI, UQI /// lsx_vinsgr2vr_h +impl = portable name = lsx_vinsgr2vr_h asm-fmts = vd, rj, ui3 data-types = V8HI, V8HI, SI, UQI /// lsx_vinsgr2vr_w +impl = portable name = lsx_vinsgr2vr_w asm-fmts = vd, rj, ui2 data-types = V4SI, V4SI, SI, UQI /// lsx_vinsgr2vr_d +impl = portable name = lsx_vinsgr2vr_d asm-fmts = vd, rj, ui1 data-types = V2DI, V2DI, DI, UQI /// lsx_vfadd_s +impl = portable name = lsx_vfadd_s asm-fmts = vd, vj, vk data-types = V4SF, V4SF, V4SF /// lsx_vfadd_d +impl = portable name = lsx_vfadd_d asm-fmts = vd, vj, vk data-types = V2DF, V2DF, V2DF /// lsx_vfsub_s +impl = portable name = lsx_vfsub_s asm-fmts = vd, vj, vk data-types = V4SF, V4SF, V4SF /// lsx_vfsub_d +impl = portable name = lsx_vfsub_d asm-fmts = vd, vj, vk data-types = V2DF, V2DF, V2DF /// lsx_vfmul_s +impl = portable name = lsx_vfmul_s asm-fmts = vd, vj, vk data-types = V4SF, V4SF, V4SF /// lsx_vfmul_d +impl = portable name = lsx_vfmul_d asm-fmts = vd, vj, vk data-types = V2DF, V2DF, V2DF /// lsx_vfdiv_s +impl = portable name = lsx_vfdiv_s asm-fmts = vd, vj, vk data-types = V4SF, V4SF, V4SF /// lsx_vfdiv_d +impl = portable name = lsx_vfdiv_d asm-fmts = vd, vj, vk data-types = V2DF, V2DF, V2DF @@ -1704,11 +1876,13 @@ asm-fmts = vd, vj data-types = V2DI, V2DF /// lsx_vfsqrt_s +impl = portable name = lsx_vfsqrt_s asm-fmts = vd, vj data-types = V4SF, V4SF /// lsx_vfsqrt_d +impl = portable name = lsx_vfsqrt_d asm-fmts = vd, vj data-types = V2DF, V2DF @@ -1854,26 +2028,31 @@ asm-fmts = vd, vj data-types = V2DF, UV2DI /// lsx_vandn_v +impl = portable name = lsx_vandn_v asm-fmts = vd, vj, vk data-types = UV16QI, UV16QI, UV16QI /// lsx_vneg_b +impl = portable name = lsx_vneg_b asm-fmts = vd, vj data-types = V16QI, V16QI /// lsx_vneg_h +impl = portable name = lsx_vneg_h asm-fmts = vd, vj data-types = V8HI, V8HI /// lsx_vneg_w +impl = portable name = lsx_vneg_w asm-fmts = vd, vj data-types = V4SI, V4SI /// lsx_vneg_d +impl = portable name = lsx_vneg_d asm-fmts = vd, vj data-types = V2DI, V2DI @@ -2194,41 +2373,49 @@ asm-fmts = vd, vj, vk data-types = V2DI, V2DI, V2DI /// lsx_vfmadd_s +impl = portable name = lsx_vfmadd_s asm-fmts = vd, vj, vk, va data-types = V4SF, V4SF, V4SF, V4SF /// lsx_vfmadd_d +impl = portable name = lsx_vfmadd_d asm-fmts = vd, vj, vk, va data-types = V2DF, V2DF, V2DF, V2DF /// lsx_vfmsub_s +impl = portable name = lsx_vfmsub_s asm-fmts = vd, vj, vk, va data-types = V4SF, V4SF, V4SF, V4SF /// lsx_vfmsub_d +impl = portable name = lsx_vfmsub_d asm-fmts = vd, vj, vk, va data-types = V2DF, V2DF, V2DF, V2DF /// lsx_vfnmadd_s +impl = portable name = lsx_vfnmadd_s asm-fmts = vd, vj, vk, va data-types = V4SF, V4SF, V4SF, V4SF /// lsx_vfnmadd_d +impl = portable name = lsx_vfnmadd_d asm-fmts = vd, vj, vk, va data-types = V2DF, V2DF, V2DF, V2DF /// lsx_vfnmsub_s +impl = portable name = lsx_vfnmsub_s asm-fmts = vd, vj, vk, va data-types = V4SF, V4SF, V4SF, V4SF /// lsx_vfnmsub_d +impl = portable name = lsx_vfnmsub_d asm-fmts = vd, vj, vk, va data-types = V2DF, V2DF, V2DF, V2DF @@ -3284,6 +3471,7 @@ asm-fmts = vd, vj, vk data-types = V4SI, V2DI, V2DI /// lsx_vorn_v +impl = portable name = lsx_vorn_v asm-fmts = vd, vj, vk data-types = UV16QI, UV16QI, UV16QI @@ -3584,21 +3772,25 @@ asm-fmts = vd, vj, vk data-types = V4SI, V4SF, V4SF /// lsx_vrepli_b +impl = portable name = lsx_vrepli_b asm-fmts = vd, si10 data-types = V16QI, HI /// lsx_vrepli_d +impl = portable name = lsx_vrepli_d asm-fmts = vd, si10 data-types = V2DI, HI /// lsx_vrepli_h +impl = portable name = lsx_vrepli_h asm-fmts = vd, si10 data-types = V8HI, HI /// lsx_vrepli_w +impl = portable name = lsx_vrepli_w asm-fmts = vd, si10 data-types = V4SI, HI diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/portable-intrinsics.txt b/library/stdarch/crates/stdarch-gen-loongarch/src/portable-intrinsics.txt index b62f7f5f8f05d..2d5e9817a31c8 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/src/portable-intrinsics.txt +++ b/library/stdarch/crates/stdarch-gen-loongarch/src/portable-intrinsics.txt @@ -1,2 +1,381 @@ # LSX intrinsics +lsx_vsll_b +lsx_vsll_h +lsx_vsll_w +lsx_vsll_d +lsx_vslli_b +lsx_vslli_h +lsx_vslli_w +lsx_vslli_d +lsx_vsra_b +lsx_vsra_h +lsx_vsra_w +lsx_vsra_d +lsx_vsrai_b +lsx_vsrai_h +lsx_vsrai_w +lsx_vsrai_d +lsx_vsrl_b +lsx_vsrl_h +lsx_vsrl_w +lsx_vsrl_d +lsx_vsrli_b +lsx_vsrli_h +lsx_vsrli_w +lsx_vsrli_d +lsx_vadd_b +lsx_vadd_h +lsx_vadd_w +lsx_vadd_d +lsx_vaddi_bu +lsx_vaddi_hu +lsx_vaddi_wu +lsx_vaddi_du +lsx_vsub_b +lsx_vsub_h +lsx_vsub_w +lsx_vsub_d +lsx_vmax_b +lsx_vmax_h +lsx_vmax_w +lsx_vmax_d +lsx_vmaxi_b +lsx_vmaxi_h +lsx_vmaxi_w +lsx_vmaxi_d +lsx_vmax_bu +lsx_vmax_hu +lsx_vmax_wu +lsx_vmax_du +lsx_vmaxi_bu +lsx_vmaxi_hu +lsx_vmaxi_wu +lsx_vmaxi_du +lsx_vmin_b +lsx_vmin_h +lsx_vmin_w +lsx_vmin_d +lsx_vmini_b +lsx_vmini_h +lsx_vmini_w +lsx_vmini_d +lsx_vmin_bu +lsx_vmin_hu +lsx_vmin_wu +lsx_vmin_du +lsx_vmini_bu +lsx_vmini_hu +lsx_vmini_wu +lsx_vmini_du +lsx_vseq_b +lsx_vseq_h +lsx_vseq_w +lsx_vseq_d +lsx_vseqi_b +lsx_vseqi_h +lsx_vseqi_w +lsx_vseqi_d +lsx_vslt_b +lsx_vslt_h +lsx_vslt_w +lsx_vslt_d +lsx_vslti_b +lsx_vslti_h +lsx_vslti_w +lsx_vslti_d +lsx_vslt_bu +lsx_vslt_hu +lsx_vslt_wu +lsx_vslt_du +lsx_vslti_bu +lsx_vslti_hu +lsx_vslti_wu +lsx_vslti_du +lsx_vsle_b +lsx_vsle_h +lsx_vsle_w +lsx_vsle_d +lsx_vslei_b +lsx_vslei_h +lsx_vslei_w +lsx_vslei_d +lsx_vsle_bu +lsx_vsle_hu +lsx_vsle_wu +lsx_vsle_du +lsx_vslei_bu +lsx_vslei_hu +lsx_vslei_wu +lsx_vslei_du +lsx_vmul_b +lsx_vmul_h +lsx_vmul_w +lsx_vmul_d +lsx_vdiv_b +lsx_vdiv_h +lsx_vdiv_w +lsx_vdiv_d +lsx_vdiv_bu +lsx_vdiv_hu +lsx_vdiv_wu +lsx_vdiv_du +lsx_vmod_b +lsx_vmod_h +lsx_vmod_w +lsx_vmod_d +lsx_vmod_bu +lsx_vmod_hu +lsx_vmod_wu +lsx_vmod_du +lsx_vmadd_b +lsx_vmadd_h +lsx_vmadd_w +lsx_vmadd_d +lsx_vmsub_b +lsx_vmsub_h +lsx_vmsub_w +lsx_vmsub_d +lsx_vand_v +lsx_vor_v +lsx_vnor_v +lsx_vxor_v +lsx_vpcnt_b +lsx_vpcnt_h +lsx_vpcnt_w +lsx_vpcnt_d +lsx_vclz_b +lsx_vclz_h +lsx_vclz_w +lsx_vclz_d +lsx_vreplgr2vr_b +lsx_vreplgr2vr_h +lsx_vreplgr2vr_w +lsx_vreplgr2vr_d +lsx_vpickve2gr_b +lsx_vpickve2gr_h +lsx_vpickve2gr_w +lsx_vpickve2gr_d +lsx_vpickve2gr_bu +lsx_vpickve2gr_hu +lsx_vpickve2gr_wu +lsx_vpickve2gr_du +lsx_vinsgr2vr_b +lsx_vinsgr2vr_h +lsx_vinsgr2vr_w +lsx_vinsgr2vr_d +lsx_vfadd_s +lsx_vfadd_d +lsx_vfsub_s +lsx_vfsub_d +lsx_vfmul_s +lsx_vfmul_d +lsx_vfdiv_s +lsx_vfdiv_d +lsx_vfsqrt_s +lsx_vfsqrt_d +lsx_vandn_v +lsx_vneg_b +lsx_vneg_h +lsx_vneg_w +lsx_vneg_d +lsx_vfmadd_s +lsx_vfmadd_d +lsx_vfmsub_s +lsx_vfmsub_d +lsx_vfnmadd_s +lsx_vfnmadd_d +lsx_vfnmsub_s +lsx_vfnmsub_d +lsx_vorn_v +lsx_vrepli_b +lsx_vrepli_h +lsx_vrepli_w +lsx_vrepli_d + # LASX intrinsics +lasx_xvsll_b +lasx_xvsll_h +lasx_xvsll_w +lasx_xvsll_d +lasx_xvslli_b +lasx_xvslli_h +lasx_xvslli_w +lasx_xvslli_d +lasx_xvsra_b +lasx_xvsra_h +lasx_xvsra_w +lasx_xvsra_d +lasx_xvsrai_b +lasx_xvsrai_h +lasx_xvsrai_w +lasx_xvsrai_d +lasx_xvsrl_b +lasx_xvsrl_h +lasx_xvsrl_w +lasx_xvsrl_d +lasx_xvsrli_b +lasx_xvsrli_h +lasx_xvsrli_w +lasx_xvsrli_d +lasx_xvadd_b +lasx_xvadd_h +lasx_xvadd_w +lasx_xvadd_d +lasx_xvaddi_bu +lasx_xvaddi_hu +lasx_xvaddi_wu +lasx_xvaddi_du +lasx_xvsub_b +lasx_xvsub_h +lasx_xvsub_w +lasx_xvsub_d +lasx_xvmax_b +lasx_xvmax_h +lasx_xvmax_w +lasx_xvmax_d +lasx_xvmaxi_b +lasx_xvmaxi_h +lasx_xvmaxi_w +lasx_xvmaxi_d +lasx_xvmax_bu +lasx_xvmax_hu +lasx_xvmax_wu +lasx_xvmax_du +lasx_xvmaxi_bu +lasx_xvmaxi_hu +lasx_xvmaxi_wu +lasx_xvmaxi_du +lasx_xvmin_b +lasx_xvmin_h +lasx_xvmin_w +lasx_xvmin_d +lasx_xvmini_b +lasx_xvmini_h +lasx_xvmini_w +lasx_xvmini_d +lasx_xvmin_bu +lasx_xvmin_hu +lasx_xvmin_wu +lasx_xvmin_du +lasx_xvmini_bu +lasx_xvmini_hu +lasx_xvmini_wu +lasx_xvmini_du +lasx_xvseq_b +lasx_xvseq_h +lasx_xvseq_w +lasx_xvseq_d +lasx_xvseqi_b +lasx_xvseqi_h +lasx_xvseqi_w +lasx_xvseqi_d +lasx_xvslt_b +lasx_xvslt_h +lasx_xvslt_w +lasx_xvslt_d +lasx_xvslti_b +lasx_xvslti_h +lasx_xvslti_w +lasx_xvslti_d +lasx_xvslt_bu +lasx_xvslt_hu +lasx_xvslt_wu +lasx_xvslt_du +lasx_xvslti_bu +lasx_xvslti_hu +lasx_xvslti_wu +lasx_xvslti_du +lasx_xvsle_b +lasx_xvsle_h +lasx_xvsle_w +lasx_xvsle_d +lasx_xvslei_b +lasx_xvslei_h +lasx_xvslei_w +lasx_xvslei_d +lasx_xvsle_bu +lasx_xvsle_hu +lasx_xvsle_wu +lasx_xvsle_du +lasx_xvslei_bu +lasx_xvslei_hu +lasx_xvslei_wu +lasx_xvslei_du +lasx_xvmul_b +lasx_xvmul_h +lasx_xvmul_w +lasx_xvmul_d +lasx_xvdiv_b +lasx_xvdiv_h +lasx_xvdiv_w +lasx_xvdiv_d +lasx_xvdiv_bu +lasx_xvdiv_hu +lasx_xvdiv_wu +lasx_xvdiv_du +lasx_xvmod_b +lasx_xvmod_h +lasx_xvmod_w +lasx_xvmod_d +lasx_xvmod_bu +lasx_xvmod_hu +lasx_xvmod_wu +lasx_xvmod_du +lasx_xvmadd_b +lasx_xvmadd_h +lasx_xvmadd_w +lasx_xvmadd_d +lasx_xvmsub_b +lasx_xvmsub_h +lasx_xvmsub_w +lasx_xvmsub_d +lasx_xvand_v +lasx_xvor_v +lasx_xvnor_v +lasx_xvxor_v +lasx_xvpcnt_b +lasx_xvpcnt_h +lasx_xvpcnt_w +lasx_xvpcnt_d +lasx_xvclz_b +lasx_xvclz_h +lasx_xvclz_w +lasx_xvclz_d +lasx_xvreplgr2vr_b +lasx_xvreplgr2vr_h +lasx_xvreplgr2vr_w +lasx_xvreplgr2vr_d +lasx_xvpickve2gr_w +lasx_xvpickve2gr_d +lasx_xvpickve2gr_wu +lasx_xvpickve2gr_du +lasx_xvinsgr2vr_w +lasx_xvinsgr2vr_d +lasx_xvfadd_s +lasx_xvfadd_d +lasx_xvfsub_s +lasx_xvfsub_d +lasx_xvfmul_s +lasx_xvfmul_d +lasx_xvfdiv_s +lasx_xvfdiv_d +lasx_xvfsqrt_s +lasx_xvfsqrt_d +lasx_xvandn_v +lasx_xvneg_b +lasx_xvneg_h +lasx_xvneg_w +lasx_xvneg_d +lasx_xvfmadd_s +lasx_xvfmadd_d +lasx_xvfmsub_s +lasx_xvfmsub_d +lasx_xvfnmadd_s +lasx_xvfnmadd_d +lasx_xvfnmsub_s +lasx_xvfnmsub_d +lasx_xvorn_v +lasx_xvrepli_b +lasx_xvrepli_h +lasx_xvrepli_w +lasx_xvrepli_d From 93344416add10403cf0685160c5575cf53144654 Mon Sep 17 00:00:00 2001 From: Folkert de Vries Date: Sat, 25 Apr 2026 01:01:21 +0200 Subject: [PATCH 18/30] mark `vstl1_*` functions as unsafe these functions write to a raw pointer, and so are clearly unsafe to use --- .../core_arch/src/aarch64/neon/generated.rs | 56 +++++++++++-------- .../spec/neon/aarch64.spec.yml | 8 ++- .../crates/stdarch-gen-arm/src/intrinsic.rs | 5 ++ 3 files changed, 45 insertions(+), 24 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index c9ce7a69a6578..6958fee50a378 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -25421,107 +25421,119 @@ pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { } #[doc = "Store-Release a single-element structure from one lane of one register."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] #[inline(always)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")] #[cfg(target_has_atomic = "64")] -pub fn vstl1_lane_f64(ptr: *mut f64, val: float64x1_t) { +pub unsafe fn vstl1_lane_f64(ptr: *mut f64, val: float64x1_t) { static_assert!(LANE == 0); - unsafe { vstl1_lane_s64::(ptr as *mut i64, transmute(val)) } + vstl1_lane_s64::(ptr as *mut i64, transmute(val)) } #[doc = "Store-Release a single-element structure from one lane of one register."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_f64)"] +#[doc = "## Safety"] +#[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] #[inline(always)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")] #[cfg(target_has_atomic = "64")] -pub fn vstl1q_lane_f64(ptr: *mut f64, val: float64x2_t) { +pub unsafe fn vstl1q_lane_f64(ptr: *mut f64, val: float64x2_t) { static_assert_uimm_bits!(LANE, 1); - unsafe { vstl1q_lane_s64::(ptr as *mut i64, transmute(val)) } + vstl1q_lane_s64::(ptr as *mut i64, transmute(val)) } #[doc = "Store-Release a single-element structure from one lane of one register."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] #[inline(always)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")] #[cfg(target_has_atomic = "64")] -pub fn vstl1_lane_u64(ptr: *mut u64, val: uint64x1_t) { +pub unsafe fn vstl1_lane_u64(ptr: *mut u64, val: uint64x1_t) { static_assert!(LANE == 0); - unsafe { vstl1_lane_s64::(ptr as *mut i64, transmute(val)) } + vstl1_lane_s64::(ptr as *mut i64, transmute(val)) } #[doc = "Store-Release a single-element structure from one lane of one register."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_u64)"] +#[doc = "## Safety"] +#[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] #[inline(always)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")] #[cfg(target_has_atomic = "64")] -pub fn vstl1q_lane_u64(ptr: *mut u64, val: uint64x2_t) { +pub unsafe fn vstl1q_lane_u64(ptr: *mut u64, val: uint64x2_t) { static_assert_uimm_bits!(LANE, 1); - unsafe { vstl1q_lane_s64::(ptr as *mut i64, transmute(val)) } + vstl1q_lane_s64::(ptr as *mut i64, transmute(val)) } #[doc = "Store-Release a single-element structure from one lane of one register."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] #[inline(always)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")] #[cfg(target_has_atomic = "64")] -pub fn vstl1_lane_p64(ptr: *mut p64, val: poly64x1_t) { +pub unsafe fn vstl1_lane_p64(ptr: *mut p64, val: poly64x1_t) { static_assert!(LANE == 0); - unsafe { vstl1_lane_s64::(ptr as *mut i64, transmute(val)) } + vstl1_lane_s64::(ptr as *mut i64, transmute(val)) } #[doc = "Store-Release a single-element structure from one lane of one register."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_p64)"] +#[doc = "## Safety"] +#[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] #[inline(always)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")] #[cfg(target_has_atomic = "64")] -pub fn vstl1q_lane_p64(ptr: *mut p64, val: poly64x2_t) { +pub unsafe fn vstl1q_lane_p64(ptr: *mut p64, val: poly64x2_t) { static_assert_uimm_bits!(LANE, 1); - unsafe { vstl1q_lane_s64::(ptr as *mut i64, transmute(val)) } + vstl1q_lane_s64::(ptr as *mut i64, transmute(val)) } #[doc = "Store-Release a single-element structure from one lane of one register."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] #[inline(always)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")] #[cfg(target_has_atomic = "64")] -pub fn vstl1_lane_s64(ptr: *mut i64, val: int64x1_t) { +pub unsafe fn vstl1_lane_s64(ptr: *mut i64, val: int64x1_t) { static_assert!(LANE == 0); let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64; - unsafe { - let lane: i64 = simd_extract!(val, LANE as u32); - (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release) - } + let lane: i64 = simd_extract!(val, LANE as u32); + (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release) } #[doc = "Store-Release a single-element structure from one lane of one register."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_s64)"] +#[doc = "## Safety"] +#[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] #[inline(always)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_neon_feat_lrcpc3", issue = "none")] #[cfg(target_has_atomic = "64")] -pub fn vstl1q_lane_s64(ptr: *mut i64, val: int64x2_t) { +pub unsafe fn vstl1q_lane_s64(ptr: *mut i64, val: int64x2_t) { static_assert_uimm_bits!(LANE, 1); let atomic_dst = ptr as *mut crate::sync::atomic::AtomicI64; - unsafe { - let lane: i64 = simd_extract!(val, LANE as u32); - (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release) - } + let lane: i64 = simd_extract!(val, LANE as u32); + (*atomic_dst).store(transmute(lane), crate::sync::atomic::Ordering::Release) } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"] diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index a769d352649c9..04a78f1868e6c 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -4459,7 +4459,9 @@ intrinsics: doc: "Store-Release a single-element structure from one lane of one register." arguments: ["ptr: {type[0]}", "val: {neon_type[1]}"] static_defs: ["const LANE: i32"] - safety: safe + safety: + unsafe: + - pointer_write: ptr attr: - FnCall: [target_feature, ['enable = "neon,rcpc3"']] - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env= "msvc"']]}]]}, {FnCall: [assert_instr, [stl1, 'LANE = 0']]}]] @@ -4488,7 +4490,9 @@ intrinsics: doc: "Store-Release a single-element structure from one lane of one register." arguments: ["ptr: {type[0]}", "val: {neon_type[1]}"] static_defs: ["const LANE: i32"] - safety: safe + safety: + unsafe: + - pointer_write: ptr attr: - FnCall: [target_feature, ['enable = "neon,rcpc3"']] - FnCall: [cfg_attr, [{FnCall: [all, [test, {FnCall: [not, ['target_env= "msvc"']]}]]}, {FnCall: [assert_instr, [stl1, 'LANE = 0']]}]] diff --git a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs index 5d38d45ca6900..aadff82928803 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs @@ -806,6 +806,7 @@ pub enum UnsafetyComment { NonTemporal, Neon, NoProvenance(String), + PointerWrite(String), } #[derive(Debug, Clone, Default, Serialize, Deserialize)] @@ -874,6 +875,10 @@ impl fmt::Display for UnsafetyComment { `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane \ before using it." ), + Self::PointerWrite(arg) => write!( + f, + "The pointer in `{arg}` must satisfy the requirements of [`core::ptr::write`]." + ), Self::UnpredictableOnFault => write!( f, "Result lanes corresponding to inactive FFR lanes (either before or as a result \ From 89a665754a61c9131bd30dfa4a998a7087e4290c Mon Sep 17 00:00:00 2001 From: WANG Rui Date: Tue, 28 Apr 2026 23:57:40 +0800 Subject: [PATCH 19/30] loongarch: Refactor portable SIMD helper naming and intrinsic paths Rename `SimdL` to `SimdExt`, `impl_simdl!` to `impl_simd_ext!`, and the helper functions in `loongarch64/simd.rs` from `simdl_*` to `simd_*`. Also qualify intrinsic and helper calls explicitly with aliases: - `is::` for `crate::intrinsics::simd` - `cs::` for `crate::core_arch::simd` - `ls::` for local LoongArch SIMD helpers --- .../src/loongarch64/lasx/portable.rs | 378 ++++++++--------- .../core_arch/src/loongarch64/lsx/portable.rs | 390 +++++++++--------- .../crates/core_arch/src/loongarch64/simd.rs | 139 ++++--- 3 files changed, 453 insertions(+), 454 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/portable.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/portable.rs index 0021d7605f9aa..f2a7254d7947b 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/portable.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/portable.rs @@ -1,201 +1,201 @@ //! LoongArch64 LASX intrinsics - intrinsics::simd implementation -use super::super::{simd::*, *}; -use crate::core_arch::simd::*; -use crate::intrinsics::simd::*; +use super::super::{simd as ls, simd::*, *}; +use crate::core_arch::simd::{self as cs, *}; +use crate::intrinsics::simd as is; use crate::mem::transmute; -impl_vv!("lasx", lasx_xvpcnt_b, simd_ctpop, m256i, i8x32); -impl_vv!("lasx", lasx_xvpcnt_h, simd_ctpop, m256i, i16x16); -impl_vv!("lasx", lasx_xvpcnt_w, simd_ctpop, m256i, i32x8); -impl_vv!("lasx", lasx_xvpcnt_d, simd_ctpop, m256i, i64x4); -impl_vv!("lasx", lasx_xvclz_b, simd_ctlz, m256i, i8x32); -impl_vv!("lasx", lasx_xvclz_h, simd_ctlz, m256i, i16x16); -impl_vv!("lasx", lasx_xvclz_w, simd_ctlz, m256i, i32x8); -impl_vv!("lasx", lasx_xvclz_d, simd_ctlz, m256i, i64x4); -impl_vv!("lasx", lasx_xvneg_b, simd_neg, m256i, i8x32); -impl_vv!("lasx", lasx_xvneg_h, simd_neg, m256i, i16x16); -impl_vv!("lasx", lasx_xvneg_w, simd_neg, m256i, i32x8); -impl_vv!("lasx", lasx_xvneg_d, simd_neg, m256i, i64x4); -impl_vv!("lasx", lasx_xvfsqrt_s, simd_fsqrt, m256, f32x8); -impl_vv!("lasx", lasx_xvfsqrt_d, simd_fsqrt, m256d, f64x4); +impl_vv!("lasx", lasx_xvpcnt_b, is::simd_ctpop, m256i, i8x32); +impl_vv!("lasx", lasx_xvpcnt_h, is::simd_ctpop, m256i, i16x16); +impl_vv!("lasx", lasx_xvpcnt_w, is::simd_ctpop, m256i, i32x8); +impl_vv!("lasx", lasx_xvpcnt_d, is::simd_ctpop, m256i, i64x4); +impl_vv!("lasx", lasx_xvclz_b, is::simd_ctlz, m256i, i8x32); +impl_vv!("lasx", lasx_xvclz_h, is::simd_ctlz, m256i, i16x16); +impl_vv!("lasx", lasx_xvclz_w, is::simd_ctlz, m256i, i32x8); +impl_vv!("lasx", lasx_xvclz_d, is::simd_ctlz, m256i, i64x4); +impl_vv!("lasx", lasx_xvneg_b, is::simd_neg, m256i, i8x32); +impl_vv!("lasx", lasx_xvneg_h, is::simd_neg, m256i, i16x16); +impl_vv!("lasx", lasx_xvneg_w, is::simd_neg, m256i, i32x8); +impl_vv!("lasx", lasx_xvneg_d, is::simd_neg, m256i, i64x4); +impl_vv!("lasx", lasx_xvfsqrt_s, is::simd_fsqrt, m256, f32x8); +impl_vv!("lasx", lasx_xvfsqrt_d, is::simd_fsqrt, m256d, f64x4); -impl_gv!("lasx", lasx_xvreplgr2vr_b, simdl_splat, m256i, i8x32, i32); -impl_gv!("lasx", lasx_xvreplgr2vr_h, simdl_splat, m256i, i16x16, i32); -impl_gv!("lasx", lasx_xvreplgr2vr_w, simdl_splat, m256i, i32x8, i32); -impl_gv!("lasx", lasx_xvreplgr2vr_d, simdl_splat, m256i, i64x4, i64); +impl_gv!("lasx", lasx_xvreplgr2vr_b, ls::simd_splat, m256i, i8x32, i32); +impl_gv!("lasx", lasx_xvreplgr2vr_h, ls::simd_splat, m256i, i16x16, i32); +impl_gv!("lasx", lasx_xvreplgr2vr_w, ls::simd_splat, m256i, i32x8, i32); +impl_gv!("lasx", lasx_xvreplgr2vr_d, ls::simd_splat, m256i, i64x4, i64); -impl_sv!("lasx", lasx_xvrepli_b, simdl_splat, m256i, i8x32, 10); -impl_sv!("lasx", lasx_xvrepli_h, simdl_splat, m256i, i16x16, 10); -impl_sv!("lasx", lasx_xvrepli_w, simdl_splat, m256i, i32x8, 10); -impl_sv!("lasx", lasx_xvrepli_d, simdl_splat, m256i, i64x4, 10); +impl_sv!("lasx", lasx_xvrepli_b, ls::simd_splat, m256i, i8x32, 10); +impl_sv!("lasx", lasx_xvrepli_h, ls::simd_splat, m256i, i16x16, 10); +impl_sv!("lasx", lasx_xvrepli_w, ls::simd_splat, m256i, i32x8, 10); +impl_sv!("lasx", lasx_xvrepli_d, ls::simd_splat, m256i, i64x4, 10); -impl_vvv!("lasx", lasx_xvadd_b, simd_add, m256i, i8x32); -impl_vvv!("lasx", lasx_xvadd_h, simd_add, m256i, i16x16); -impl_vvv!("lasx", lasx_xvadd_w, simd_add, m256i, i32x8); -impl_vvv!("lasx", lasx_xvadd_d, simd_add, m256i, i64x4); -impl_vvv!("lasx", lasx_xvsub_b, simd_sub, m256i, i8x32); -impl_vvv!("lasx", lasx_xvsub_h, simd_sub, m256i, i16x16); -impl_vvv!("lasx", lasx_xvsub_w, simd_sub, m256i, i32x8); -impl_vvv!("lasx", lasx_xvsub_d, simd_sub, m256i, i64x4); -impl_vvv!("lasx", lasx_xvmax_b, simd_imax, m256i, i8x32); -impl_vvv!("lasx", lasx_xvmax_h, simd_imax, m256i, i16x16); -impl_vvv!("lasx", lasx_xvmax_w, simd_imax, m256i, i32x8); -impl_vvv!("lasx", lasx_xvmax_d, simd_imax, m256i, i64x4); -impl_vvv!("lasx", lasx_xvmax_bu, simd_imax, m256i, u8x32); -impl_vvv!("lasx", lasx_xvmax_hu, simd_imax, m256i, u16x16); -impl_vvv!("lasx", lasx_xvmax_wu, simd_imax, m256i, u32x8); -impl_vvv!("lasx", lasx_xvmax_du, simd_imax, m256i, u64x4); -impl_vvv!("lasx", lasx_xvmin_b, simd_imin, m256i, i8x32); -impl_vvv!("lasx", lasx_xvmin_h, simd_imin, m256i, i16x16); -impl_vvv!("lasx", lasx_xvmin_w, simd_imin, m256i, i32x8); -impl_vvv!("lasx", lasx_xvmin_d, simd_imin, m256i, i64x4); -impl_vvv!("lasx", lasx_xvmin_bu, simd_imin, m256i, u8x32); -impl_vvv!("lasx", lasx_xvmin_hu, simd_imin, m256i, u16x16); -impl_vvv!("lasx", lasx_xvmin_wu, simd_imin, m256i, u32x8); -impl_vvv!("lasx", lasx_xvmin_du, simd_imin, m256i, u64x4); -impl_vvv!("lasx", lasx_xvseq_b, simd_eq, m256i, i8x32); -impl_vvv!("lasx", lasx_xvseq_h, simd_eq, m256i, i16x16); -impl_vvv!("lasx", lasx_xvseq_w, simd_eq, m256i, i32x8); -impl_vvv!("lasx", lasx_xvseq_d, simd_eq, m256i, i64x4); -impl_vvv!("lasx", lasx_xvslt_b, simd_lt, m256i, i8x32); -impl_vvv!("lasx", lasx_xvslt_h, simd_lt, m256i, i16x16); -impl_vvv!("lasx", lasx_xvslt_w, simd_lt, m256i, i32x8); -impl_vvv!("lasx", lasx_xvslt_d, simd_lt, m256i, i64x4); -impl_vvv!("lasx", lasx_xvslt_bu, simd_lt, m256i, u8x32); -impl_vvv!("lasx", lasx_xvslt_hu, simd_lt, m256i, u16x16); -impl_vvv!("lasx", lasx_xvslt_wu, simd_lt, m256i, u32x8); -impl_vvv!("lasx", lasx_xvslt_du, simd_lt, m256i, u64x4); -impl_vvv!("lasx", lasx_xvsle_b, simd_le, m256i, i8x32); -impl_vvv!("lasx", lasx_xvsle_h, simd_le, m256i, i16x16); -impl_vvv!("lasx", lasx_xvsle_w, simd_le, m256i, i32x8); -impl_vvv!("lasx", lasx_xvsle_d, simd_le, m256i, i64x4); -impl_vvv!("lasx", lasx_xvsle_bu, simd_le, m256i, u8x32); -impl_vvv!("lasx", lasx_xvsle_hu, simd_le, m256i, u16x16); -impl_vvv!("lasx", lasx_xvsle_wu, simd_le, m256i, u32x8); -impl_vvv!("lasx", lasx_xvsle_du, simd_le, m256i, u64x4); -impl_vvv!("lasx", lasx_xvmul_b, simd_mul, m256i, i8x32); -impl_vvv!("lasx", lasx_xvmul_h, simd_mul, m256i, i16x16); -impl_vvv!("lasx", lasx_xvmul_w, simd_mul, m256i, i32x8); -impl_vvv!("lasx", lasx_xvmul_d, simd_mul, m256i, i64x4); -impl_vvv!("lasx", lasx_xvdiv_b, simd_div, m256i, i8x32); -impl_vvv!("lasx", lasx_xvdiv_h, simd_div, m256i, i16x16); -impl_vvv!("lasx", lasx_xvdiv_w, simd_div, m256i, i32x8); -impl_vvv!("lasx", lasx_xvdiv_d, simd_div, m256i, i64x4); -impl_vvv!("lasx", lasx_xvdiv_bu, simd_div, m256i, u8x32); -impl_vvv!("lasx", lasx_xvdiv_hu, simd_div, m256i, u16x16); -impl_vvv!("lasx", lasx_xvdiv_wu, simd_div, m256i, u32x8); -impl_vvv!("lasx", lasx_xvdiv_du, simd_div, m256i, u64x4); -impl_vvv!("lasx", lasx_xvmod_b, simd_rem, m256i, i8x32); -impl_vvv!("lasx", lasx_xvmod_h, simd_rem, m256i, i16x16); -impl_vvv!("lasx", lasx_xvmod_w, simd_rem, m256i, i32x8); -impl_vvv!("lasx", lasx_xvmod_d, simd_rem, m256i, i64x4); -impl_vvv!("lasx", lasx_xvmod_bu, simd_rem, m256i, u8x32); -impl_vvv!("lasx", lasx_xvmod_hu, simd_rem, m256i, u16x16); -impl_vvv!("lasx", lasx_xvmod_wu, simd_rem, m256i, u32x8); -impl_vvv!("lasx", lasx_xvmod_du, simd_rem, m256i, u64x4); -impl_vvv!("lasx", lasx_xvand_v, simd_and, m256i, u8x32); -impl_vvv!("lasx", lasx_xvandn_v, simdl_andn, m256i, u8x32); -impl_vvv!("lasx", lasx_xvor_v, simd_or, m256i, u8x32); -impl_vvv!("lasx", lasx_xvorn_v, simdl_orn, m256i, u8x32); -impl_vvv!("lasx", lasx_xvnor_v, simdl_nor, m256i, u8x32); -impl_vvv!("lasx", lasx_xvxor_v, simd_xor, m256i, u8x32); -impl_vvv!("lasx", lasx_xvfadd_s, simd_add, m256, f32x8); -impl_vvv!("lasx", lasx_xvfadd_d, simd_add, m256d, f64x4); -impl_vvv!("lasx", lasx_xvfsub_s, simd_sub, m256, f32x8); -impl_vvv!("lasx", lasx_xvfsub_d, simd_sub, m256d, f64x4); -impl_vvv!("lasx", lasx_xvfmul_s, simd_mul, m256, f32x8); -impl_vvv!("lasx", lasx_xvfmul_d, simd_mul, m256d, f64x4); -impl_vvv!("lasx", lasx_xvfdiv_s, simd_div, m256, f32x8); -impl_vvv!("lasx", lasx_xvfdiv_d, simd_div, m256d, f64x4); -impl_vvv!("lasx", lasx_xvsll_b, simdl_shl, m256i, i8x32); -impl_vvv!("lasx", lasx_xvsll_h, simdl_shl, m256i, i16x16); -impl_vvv!("lasx", lasx_xvsll_w, simdl_shl, m256i, i32x8); -impl_vvv!("lasx", lasx_xvsll_d, simdl_shl, m256i, i64x4); -impl_vvv!("lasx", lasx_xvsra_b, simdl_shr, m256i, i8x32); -impl_vvv!("lasx", lasx_xvsra_h, simdl_shr, m256i, i16x16); -impl_vvv!("lasx", lasx_xvsra_w, simdl_shr, m256i, i32x8); -impl_vvv!("lasx", lasx_xvsra_d, simdl_shr, m256i, i64x4); -impl_vvv!("lasx", lasx_xvsrl_b, simdl_shr, m256i, u8x32); -impl_vvv!("lasx", lasx_xvsrl_h, simdl_shr, m256i, u16x16); -impl_vvv!("lasx", lasx_xvsrl_w, simdl_shr, m256i, u32x8); -impl_vvv!("lasx", lasx_xvsrl_d, simdl_shr, m256i, u64x4); +impl_vvv!("lasx", lasx_xvadd_b, is::simd_add, m256i, i8x32); +impl_vvv!("lasx", lasx_xvadd_h, is::simd_add, m256i, i16x16); +impl_vvv!("lasx", lasx_xvadd_w, is::simd_add, m256i, i32x8); +impl_vvv!("lasx", lasx_xvadd_d, is::simd_add, m256i, i64x4); +impl_vvv!("lasx", lasx_xvsub_b, is::simd_sub, m256i, i8x32); +impl_vvv!("lasx", lasx_xvsub_h, is::simd_sub, m256i, i16x16); +impl_vvv!("lasx", lasx_xvsub_w, is::simd_sub, m256i, i32x8); +impl_vvv!("lasx", lasx_xvsub_d, is::simd_sub, m256i, i64x4); +impl_vvv!("lasx", lasx_xvmax_b, cs::simd_imax, m256i, i8x32); +impl_vvv!("lasx", lasx_xvmax_h, cs::simd_imax, m256i, i16x16); +impl_vvv!("lasx", lasx_xvmax_w, cs::simd_imax, m256i, i32x8); +impl_vvv!("lasx", lasx_xvmax_d, cs::simd_imax, m256i, i64x4); +impl_vvv!("lasx", lasx_xvmax_bu, cs::simd_imax, m256i, u8x32); +impl_vvv!("lasx", lasx_xvmax_hu, cs::simd_imax, m256i, u16x16); +impl_vvv!("lasx", lasx_xvmax_wu, cs::simd_imax, m256i, u32x8); +impl_vvv!("lasx", lasx_xvmax_du, cs::simd_imax, m256i, u64x4); +impl_vvv!("lasx", lasx_xvmin_b, cs::simd_imin, m256i, i8x32); +impl_vvv!("lasx", lasx_xvmin_h, cs::simd_imin, m256i, i16x16); +impl_vvv!("lasx", lasx_xvmin_w, cs::simd_imin, m256i, i32x8); +impl_vvv!("lasx", lasx_xvmin_d, cs::simd_imin, m256i, i64x4); +impl_vvv!("lasx", lasx_xvmin_bu, cs::simd_imin, m256i, u8x32); +impl_vvv!("lasx", lasx_xvmin_hu, cs::simd_imin, m256i, u16x16); +impl_vvv!("lasx", lasx_xvmin_wu, cs::simd_imin, m256i, u32x8); +impl_vvv!("lasx", lasx_xvmin_du, cs::simd_imin, m256i, u64x4); +impl_vvv!("lasx", lasx_xvseq_b, is::simd_eq, m256i, i8x32); +impl_vvv!("lasx", lasx_xvseq_h, is::simd_eq, m256i, i16x16); +impl_vvv!("lasx", lasx_xvseq_w, is::simd_eq, m256i, i32x8); +impl_vvv!("lasx", lasx_xvseq_d, is::simd_eq, m256i, i64x4); +impl_vvv!("lasx", lasx_xvslt_b, is::simd_lt, m256i, i8x32); +impl_vvv!("lasx", lasx_xvslt_h, is::simd_lt, m256i, i16x16); +impl_vvv!("lasx", lasx_xvslt_w, is::simd_lt, m256i, i32x8); +impl_vvv!("lasx", lasx_xvslt_d, is::simd_lt, m256i, i64x4); +impl_vvv!("lasx", lasx_xvslt_bu, is::simd_lt, m256i, u8x32); +impl_vvv!("lasx", lasx_xvslt_hu, is::simd_lt, m256i, u16x16); +impl_vvv!("lasx", lasx_xvslt_wu, is::simd_lt, m256i, u32x8); +impl_vvv!("lasx", lasx_xvslt_du, is::simd_lt, m256i, u64x4); +impl_vvv!("lasx", lasx_xvsle_b, is::simd_le, m256i, i8x32); +impl_vvv!("lasx", lasx_xvsle_h, is::simd_le, m256i, i16x16); +impl_vvv!("lasx", lasx_xvsle_w, is::simd_le, m256i, i32x8); +impl_vvv!("lasx", lasx_xvsle_d, is::simd_le, m256i, i64x4); +impl_vvv!("lasx", lasx_xvsle_bu, is::simd_le, m256i, u8x32); +impl_vvv!("lasx", lasx_xvsle_hu, is::simd_le, m256i, u16x16); +impl_vvv!("lasx", lasx_xvsle_wu, is::simd_le, m256i, u32x8); +impl_vvv!("lasx", lasx_xvsle_du, is::simd_le, m256i, u64x4); +impl_vvv!("lasx", lasx_xvmul_b, is::simd_mul, m256i, i8x32); +impl_vvv!("lasx", lasx_xvmul_h, is::simd_mul, m256i, i16x16); +impl_vvv!("lasx", lasx_xvmul_w, is::simd_mul, m256i, i32x8); +impl_vvv!("lasx", lasx_xvmul_d, is::simd_mul, m256i, i64x4); +impl_vvv!("lasx", lasx_xvdiv_b, is::simd_div, m256i, i8x32); +impl_vvv!("lasx", lasx_xvdiv_h, is::simd_div, m256i, i16x16); +impl_vvv!("lasx", lasx_xvdiv_w, is::simd_div, m256i, i32x8); +impl_vvv!("lasx", lasx_xvdiv_d, is::simd_div, m256i, i64x4); +impl_vvv!("lasx", lasx_xvdiv_bu, is::simd_div, m256i, u8x32); +impl_vvv!("lasx", lasx_xvdiv_hu, is::simd_div, m256i, u16x16); +impl_vvv!("lasx", lasx_xvdiv_wu, is::simd_div, m256i, u32x8); +impl_vvv!("lasx", lasx_xvdiv_du, is::simd_div, m256i, u64x4); +impl_vvv!("lasx", lasx_xvmod_b, is::simd_rem, m256i, i8x32); +impl_vvv!("lasx", lasx_xvmod_h, is::simd_rem, m256i, i16x16); +impl_vvv!("lasx", lasx_xvmod_w, is::simd_rem, m256i, i32x8); +impl_vvv!("lasx", lasx_xvmod_d, is::simd_rem, m256i, i64x4); +impl_vvv!("lasx", lasx_xvmod_bu, is::simd_rem, m256i, u8x32); +impl_vvv!("lasx", lasx_xvmod_hu, is::simd_rem, m256i, u16x16); +impl_vvv!("lasx", lasx_xvmod_wu, is::simd_rem, m256i, u32x8); +impl_vvv!("lasx", lasx_xvmod_du, is::simd_rem, m256i, u64x4); +impl_vvv!("lasx", lasx_xvand_v, is::simd_and, m256i, u8x32); +impl_vvv!("lasx", lasx_xvandn_v, ls::simd_andn, m256i, u8x32); +impl_vvv!("lasx", lasx_xvor_v, is::simd_or, m256i, u8x32); +impl_vvv!("lasx", lasx_xvorn_v, ls::simd_orn, m256i, u8x32); +impl_vvv!("lasx", lasx_xvnor_v, ls::simd_nor, m256i, u8x32); +impl_vvv!("lasx", lasx_xvxor_v, is::simd_xor, m256i, u8x32); +impl_vvv!("lasx", lasx_xvfadd_s, is::simd_add, m256, f32x8); +impl_vvv!("lasx", lasx_xvfadd_d, is::simd_add, m256d, f64x4); +impl_vvv!("lasx", lasx_xvfsub_s, is::simd_sub, m256, f32x8); +impl_vvv!("lasx", lasx_xvfsub_d, is::simd_sub, m256d, f64x4); +impl_vvv!("lasx", lasx_xvfmul_s, is::simd_mul, m256, f32x8); +impl_vvv!("lasx", lasx_xvfmul_d, is::simd_mul, m256d, f64x4); +impl_vvv!("lasx", lasx_xvfdiv_s, is::simd_div, m256, f32x8); +impl_vvv!("lasx", lasx_xvfdiv_d, is::simd_div, m256d, f64x4); +impl_vvv!("lasx", lasx_xvsll_b, ls::simd_shl, m256i, i8x32); +impl_vvv!("lasx", lasx_xvsll_h, ls::simd_shl, m256i, i16x16); +impl_vvv!("lasx", lasx_xvsll_w, ls::simd_shl, m256i, i32x8); +impl_vvv!("lasx", lasx_xvsll_d, ls::simd_shl, m256i, i64x4); +impl_vvv!("lasx", lasx_xvsra_b, ls::simd_shr, m256i, i8x32); +impl_vvv!("lasx", lasx_xvsra_h, ls::simd_shr, m256i, i16x16); +impl_vvv!("lasx", lasx_xvsra_w, ls::simd_shr, m256i, i32x8); +impl_vvv!("lasx", lasx_xvsra_d, ls::simd_shr, m256i, i64x4); +impl_vvv!("lasx", lasx_xvsrl_b, ls::simd_shr, m256i, u8x32); +impl_vvv!("lasx", lasx_xvsrl_h, ls::simd_shr, m256i, u16x16); +impl_vvv!("lasx", lasx_xvsrl_w, ls::simd_shr, m256i, u32x8); +impl_vvv!("lasx", lasx_xvsrl_d, ls::simd_shr, m256i, u64x4); -impl_vuv!("lasx", lasx_xvslli_b, simd_shl, m256i, i8x32); -impl_vuv!("lasx", lasx_xvslli_h, simd_shl, m256i, i16x16); -impl_vuv!("lasx", lasx_xvslli_w, simd_shl, m256i, i32x8); -impl_vuv!("lasx", lasx_xvslli_d, simd_shl, m256i, i64x4); -impl_vuv!("lasx", lasx_xvsrai_b, simd_shr, m256i, i8x32); -impl_vuv!("lasx", lasx_xvsrai_h, simd_shr, m256i, i16x16); -impl_vuv!("lasx", lasx_xvsrai_w, simd_shr, m256i, i32x8); -impl_vuv!("lasx", lasx_xvsrai_d, simd_shr, m256i, i64x4); -impl_vuv!("lasx", lasx_xvsrli_b, simd_shr, m256i, u8x32); -impl_vuv!("lasx", lasx_xvsrli_h, simd_shr, m256i, u16x16); -impl_vuv!("lasx", lasx_xvsrli_w, simd_shr, m256i, u32x8); -impl_vuv!("lasx", lasx_xvsrli_d, simd_shr, m256i, u64x4); -impl_vuv!("lasx", lasx_xvaddi_bu, simd_add, m256i, u8x32, 5); -impl_vuv!("lasx", lasx_xvaddi_hu, simd_add, m256i, u16x16, 5); -impl_vuv!("lasx", lasx_xvaddi_wu, simd_add, m256i, u32x8, 5); -impl_vuv!("lasx", lasx_xvaddi_du, simd_add, m256i, u64x4, 5); -impl_vuv!("lasx", lasx_xvslti_bu, simd_lt, m256i, u8x32, 5); -impl_vuv!("lasx", lasx_xvslti_hu, simd_lt, m256i, u16x16, 5); -impl_vuv!("lasx", lasx_xvslti_wu, simd_lt, m256i, u32x8, 5); -impl_vuv!("lasx", lasx_xvslti_du, simd_lt, m256i, u64x4, 5); -impl_vuv!("lasx", lasx_xvslei_bu, simd_le, m256i, u8x32, 5); -impl_vuv!("lasx", lasx_xvslei_hu, simd_le, m256i, u16x16, 5); -impl_vuv!("lasx", lasx_xvslei_wu, simd_le, m256i, u32x8, 5); -impl_vuv!("lasx", lasx_xvslei_du, simd_le, m256i, u64x4, 5); -impl_vuv!("lasx", lasx_xvmaxi_bu, simd_imax, m256i, u8x32, 5); -impl_vuv!("lasx", lasx_xvmaxi_hu, simd_imax, m256i, u16x16, 5); -impl_vuv!("lasx", lasx_xvmaxi_wu, simd_imax, m256i, u32x8, 5); -impl_vuv!("lasx", lasx_xvmaxi_du, simd_imax, m256i, u64x4, 5); -impl_vuv!("lasx", lasx_xvmini_bu, simd_imin, m256i, u8x32, 5); -impl_vuv!("lasx", lasx_xvmini_hu, simd_imin, m256i, u16x16, 5); -impl_vuv!("lasx", lasx_xvmini_wu, simd_imin, m256i, u32x8, 5); -impl_vuv!("lasx", lasx_xvmini_du, simd_imin, m256i, u64x4, 5); +impl_vuv!("lasx", lasx_xvslli_b, is::simd_shl, m256i, i8x32); +impl_vuv!("lasx", lasx_xvslli_h, is::simd_shl, m256i, i16x16); +impl_vuv!("lasx", lasx_xvslli_w, is::simd_shl, m256i, i32x8); +impl_vuv!("lasx", lasx_xvslli_d, is::simd_shl, m256i, i64x4); +impl_vuv!("lasx", lasx_xvsrai_b, is::simd_shr, m256i, i8x32); +impl_vuv!("lasx", lasx_xvsrai_h, is::simd_shr, m256i, i16x16); +impl_vuv!("lasx", lasx_xvsrai_w, is::simd_shr, m256i, i32x8); +impl_vuv!("lasx", lasx_xvsrai_d, is::simd_shr, m256i, i64x4); +impl_vuv!("lasx", lasx_xvsrli_b, is::simd_shr, m256i, u8x32); +impl_vuv!("lasx", lasx_xvsrli_h, is::simd_shr, m256i, u16x16); +impl_vuv!("lasx", lasx_xvsrli_w, is::simd_shr, m256i, u32x8); +impl_vuv!("lasx", lasx_xvsrli_d, is::simd_shr, m256i, u64x4); +impl_vuv!("lasx", lasx_xvaddi_bu, is::simd_add, m256i, u8x32, 5); +impl_vuv!("lasx", lasx_xvaddi_hu, is::simd_add, m256i, u16x16, 5); +impl_vuv!("lasx", lasx_xvaddi_wu, is::simd_add, m256i, u32x8, 5); +impl_vuv!("lasx", lasx_xvaddi_du, is::simd_add, m256i, u64x4, 5); +impl_vuv!("lasx", lasx_xvslti_bu, is::simd_lt, m256i, u8x32, 5); +impl_vuv!("lasx", lasx_xvslti_hu, is::simd_lt, m256i, u16x16, 5); +impl_vuv!("lasx", lasx_xvslti_wu, is::simd_lt, m256i, u32x8, 5); +impl_vuv!("lasx", lasx_xvslti_du, is::simd_lt, m256i, u64x4, 5); +impl_vuv!("lasx", lasx_xvslei_bu, is::simd_le, m256i, u8x32, 5); +impl_vuv!("lasx", lasx_xvslei_hu, is::simd_le, m256i, u16x16, 5); +impl_vuv!("lasx", lasx_xvslei_wu, is::simd_le, m256i, u32x8, 5); +impl_vuv!("lasx", lasx_xvslei_du, is::simd_le, m256i, u64x4, 5); +impl_vuv!("lasx", lasx_xvmaxi_bu, cs::simd_imax, m256i, u8x32, 5); +impl_vuv!("lasx", lasx_xvmaxi_hu, cs::simd_imax, m256i, u16x16, 5); +impl_vuv!("lasx", lasx_xvmaxi_wu, cs::simd_imax, m256i, u32x8, 5); +impl_vuv!("lasx", lasx_xvmaxi_du, cs::simd_imax, m256i, u64x4, 5); +impl_vuv!("lasx", lasx_xvmini_bu, cs::simd_imin, m256i, u8x32, 5); +impl_vuv!("lasx", lasx_xvmini_hu, cs::simd_imin, m256i, u16x16, 5); +impl_vuv!("lasx", lasx_xvmini_wu, cs::simd_imin, m256i, u32x8, 5); +impl_vuv!("lasx", lasx_xvmini_du, cs::simd_imin, m256i, u64x4, 5); -impl_vug!("lasx", lasx_xvpickve2gr_w, simd_extract, m256i, i32x8, i32, 3); -impl_vug!("lasx", lasx_xvpickve2gr_d, simd_extract, m256i, i64x4, i64, 2); -impl_vug!("lasx", lasx_xvpickve2gr_wu, simd_extract, m256i, u32x8, u32, 3); -impl_vug!("lasx", lasx_xvpickve2gr_du, simd_extract, m256i, u64x4, u64, 2); +impl_vug!("lasx", lasx_xvpickve2gr_w, is::simd_extract, m256i, i32x8, i32, 3); +impl_vug!("lasx", lasx_xvpickve2gr_d, is::simd_extract, m256i, i64x4, i64, 2); +impl_vug!("lasx", lasx_xvpickve2gr_wu, is::simd_extract, m256i, u32x8, u32, 3); +impl_vug!("lasx", lasx_xvpickve2gr_du, is::simd_extract, m256i, u64x4, u64, 2); -impl_vsv!("lasx", lasx_xvseqi_b, simd_eq, m256i, i8x32, 5); -impl_vsv!("lasx", lasx_xvseqi_h, simd_eq, m256i, i16x16, 5); -impl_vsv!("lasx", lasx_xvseqi_w, simd_eq, m256i, i32x8, 5); -impl_vsv!("lasx", lasx_xvseqi_d, simd_eq, m256i, i64x4, 5); -impl_vsv!("lasx", lasx_xvslti_b, simd_lt, m256i, i8x32, 5); -impl_vsv!("lasx", lasx_xvslti_h, simd_lt, m256i, i16x16, 5); -impl_vsv!("lasx", lasx_xvslti_w, simd_lt, m256i, i32x8, 5); -impl_vsv!("lasx", lasx_xvslti_d, simd_lt, m256i, i64x4, 5); -impl_vsv!("lasx", lasx_xvslei_b, simd_le, m256i, i8x32, 5); -impl_vsv!("lasx", lasx_xvslei_h, simd_le, m256i, i16x16, 5); -impl_vsv!("lasx", lasx_xvslei_w, simd_le, m256i, i32x8, 5); -impl_vsv!("lasx", lasx_xvslei_d, simd_le, m256i, i64x4, 5); -impl_vsv!("lasx", lasx_xvmaxi_b, simd_imax, m256i, i8x32, 5); -impl_vsv!("lasx", lasx_xvmaxi_h, simd_imax, m256i, i16x16, 5); -impl_vsv!("lasx", lasx_xvmaxi_w, simd_imax, m256i, i32x8, 5); -impl_vsv!("lasx", lasx_xvmaxi_d, simd_imax, m256i, i64x4, 5); -impl_vsv!("lasx", lasx_xvmini_b, simd_imin, m256i, i8x32, 5); -impl_vsv!("lasx", lasx_xvmini_h, simd_imin, m256i, i16x16, 5); -impl_vsv!("lasx", lasx_xvmini_w, simd_imin, m256i, i32x8, 5); -impl_vsv!("lasx", lasx_xvmini_d, simd_imin, m256i, i64x4, 5); +impl_vsv!("lasx", lasx_xvseqi_b, is::simd_eq, m256i, i8x32, 5); +impl_vsv!("lasx", lasx_xvseqi_h, is::simd_eq, m256i, i16x16, 5); +impl_vsv!("lasx", lasx_xvseqi_w, is::simd_eq, m256i, i32x8, 5); +impl_vsv!("lasx", lasx_xvseqi_d, is::simd_eq, m256i, i64x4, 5); +impl_vsv!("lasx", lasx_xvslti_b, is::simd_lt, m256i, i8x32, 5); +impl_vsv!("lasx", lasx_xvslti_h, is::simd_lt, m256i, i16x16, 5); +impl_vsv!("lasx", lasx_xvslti_w, is::simd_lt, m256i, i32x8, 5); +impl_vsv!("lasx", lasx_xvslti_d, is::simd_lt, m256i, i64x4, 5); +impl_vsv!("lasx", lasx_xvslei_b, is::simd_le, m256i, i8x32, 5); +impl_vsv!("lasx", lasx_xvslei_h, is::simd_le, m256i, i16x16, 5); +impl_vsv!("lasx", lasx_xvslei_w, is::simd_le, m256i, i32x8, 5); +impl_vsv!("lasx", lasx_xvslei_d, is::simd_le, m256i, i64x4, 5); +impl_vsv!("lasx", lasx_xvmaxi_b, cs::simd_imax, m256i, i8x32, 5); +impl_vsv!("lasx", lasx_xvmaxi_h, cs::simd_imax, m256i, i16x16, 5); +impl_vsv!("lasx", lasx_xvmaxi_w, cs::simd_imax, m256i, i32x8, 5); +impl_vsv!("lasx", lasx_xvmaxi_d, cs::simd_imax, m256i, i64x4, 5); +impl_vsv!("lasx", lasx_xvmini_b, cs::simd_imin, m256i, i8x32, 5); +impl_vsv!("lasx", lasx_xvmini_h, cs::simd_imin, m256i, i16x16, 5); +impl_vsv!("lasx", lasx_xvmini_w, cs::simd_imin, m256i, i32x8, 5); +impl_vsv!("lasx", lasx_xvmini_d, cs::simd_imin, m256i, i64x4, 5); -impl_vvvv!("lasx", lasx_xvmadd_b, simdl_madd, m256i, i8x32); -impl_vvvv!("lasx", lasx_xvmadd_h, simdl_madd, m256i, i16x16); -impl_vvvv!("lasx", lasx_xvmadd_w, simdl_madd, m256i, i32x8); -impl_vvvv!("lasx", lasx_xvmadd_d, simdl_madd, m256i, i64x4); -impl_vvvv!("lasx", lasx_xvmsub_b, simdl_msub, m256i, i8x32); -impl_vvvv!("lasx", lasx_xvmsub_h, simdl_msub, m256i, i16x16); -impl_vvvv!("lasx", lasx_xvmsub_w, simdl_msub, m256i, i32x8); -impl_vvvv!("lasx", lasx_xvmsub_d, simdl_msub, m256i, i64x4); -impl_vvvv!("lasx", lasx_xvfmadd_s, simd_fma, m256, f32x8); -impl_vvvv!("lasx", lasx_xvfmadd_d, simd_fma, m256d, f64x4); -impl_vvvv!("lasx", lasx_xvfmsub_s, simdl_fms, m256, f32x8); -impl_vvvv!("lasx", lasx_xvfmsub_d, simdl_fms, m256d, f64x4); -impl_vvvv!("lasx", lasx_xvfnmadd_s, simdl_nfma, m256, f32x8); -impl_vvvv!("lasx", lasx_xvfnmadd_d, simdl_nfma, m256d, f64x4); -impl_vvvv!("lasx", lasx_xvfnmsub_s, simdl_nfms, m256, f32x8); -impl_vvvv!("lasx", lasx_xvfnmsub_d, simdl_nfms, m256d, f64x4); +impl_vvvv!("lasx", lasx_xvmadd_b, ls::simd_madd, m256i, i8x32); +impl_vvvv!("lasx", lasx_xvmadd_h, ls::simd_madd, m256i, i16x16); +impl_vvvv!("lasx", lasx_xvmadd_w, ls::simd_madd, m256i, i32x8); +impl_vvvv!("lasx", lasx_xvmadd_d, ls::simd_madd, m256i, i64x4); +impl_vvvv!("lasx", lasx_xvmsub_b, ls::simd_msub, m256i, i8x32); +impl_vvvv!("lasx", lasx_xvmsub_h, ls::simd_msub, m256i, i16x16); +impl_vvvv!("lasx", lasx_xvmsub_w, ls::simd_msub, m256i, i32x8); +impl_vvvv!("lasx", lasx_xvmsub_d, ls::simd_msub, m256i, i64x4); +impl_vvvv!("lasx", lasx_xvfmadd_s, is::simd_fma, m256, f32x8); +impl_vvvv!("lasx", lasx_xvfmadd_d, is::simd_fma, m256d, f64x4); +impl_vvvv!("lasx", lasx_xvfmsub_s, ls::simd_fmsub, m256, f32x8); +impl_vvvv!("lasx", lasx_xvfmsub_d, ls::simd_fmsub, m256d, f64x4); +impl_vvvv!("lasx", lasx_xvfnmadd_s, ls::simd_fnmadd, m256, f32x8); +impl_vvvv!("lasx", lasx_xvfnmadd_d, ls::simd_fnmadd, m256d, f64x4); +impl_vvvv!("lasx", lasx_xvfnmsub_s, ls::simd_fnmsub, m256, f32x8); +impl_vvvv!("lasx", lasx_xvfnmsub_d, ls::simd_fnmsub, m256d, f64x4); -impl_vugv!("lasx", lasx_xvinsgr2vr_w, simd_insert, m256i, i32x8, i32, 3); -impl_vugv!("lasx", lasx_xvinsgr2vr_d, simd_insert, m256i, i64x4, i64, 2); +impl_vugv!("lasx", lasx_xvinsgr2vr_w, is::simd_insert, m256i, i32x8, i32, 3); +impl_vugv!("lasx", lasx_xvinsgr2vr_d, is::simd_insert, m256i, i64x4, i64, 2); diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/portable.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/portable.rs index e33b1758f3111..1980000c3d4dd 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/portable.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/portable.rs @@ -1,207 +1,207 @@ //! LoongArch64 LSX intrinsics - intrinsics::simd implementation -use super::super::{simd::*, *}; -use crate::core_arch::simd::*; -use crate::intrinsics::simd::*; +use super::super::{simd as ls, simd::*, *}; +use crate::core_arch::simd::{self as cs, *}; +use crate::intrinsics::simd as is; use crate::mem::transmute; -impl_vv!("lsx", lsx_vpcnt_b, simd_ctpop, m128i, i8x16); -impl_vv!("lsx", lsx_vpcnt_h, simd_ctpop, m128i, i16x8); -impl_vv!("lsx", lsx_vpcnt_w, simd_ctpop, m128i, i32x4); -impl_vv!("lsx", lsx_vpcnt_d, simd_ctpop, m128i, i64x2); -impl_vv!("lsx", lsx_vclz_b, simd_ctlz, m128i, i8x16); -impl_vv!("lsx", lsx_vclz_h, simd_ctlz, m128i, i16x8); -impl_vv!("lsx", lsx_vclz_w, simd_ctlz, m128i, i32x4); -impl_vv!("lsx", lsx_vclz_d, simd_ctlz, m128i, i64x2); -impl_vv!("lsx", lsx_vneg_b, simd_neg, m128i, i8x16); -impl_vv!("lsx", lsx_vneg_h, simd_neg, m128i, i16x8); -impl_vv!("lsx", lsx_vneg_w, simd_neg, m128i, i32x4); -impl_vv!("lsx", lsx_vneg_d, simd_neg, m128i, i64x2); -impl_vv!("lsx", lsx_vfsqrt_s, simd_fsqrt, m128, f32x4); -impl_vv!("lsx", lsx_vfsqrt_d, simd_fsqrt, m128d, f64x2); +impl_vv!("lsx", lsx_vpcnt_b, is::simd_ctpop, m128i, i8x16); +impl_vv!("lsx", lsx_vpcnt_h, is::simd_ctpop, m128i, i16x8); +impl_vv!("lsx", lsx_vpcnt_w, is::simd_ctpop, m128i, i32x4); +impl_vv!("lsx", lsx_vpcnt_d, is::simd_ctpop, m128i, i64x2); +impl_vv!("lsx", lsx_vclz_b, is::simd_ctlz, m128i, i8x16); +impl_vv!("lsx", lsx_vclz_h, is::simd_ctlz, m128i, i16x8); +impl_vv!("lsx", lsx_vclz_w, is::simd_ctlz, m128i, i32x4); +impl_vv!("lsx", lsx_vclz_d, is::simd_ctlz, m128i, i64x2); +impl_vv!("lsx", lsx_vneg_b, is::simd_neg, m128i, i8x16); +impl_vv!("lsx", lsx_vneg_h, is::simd_neg, m128i, i16x8); +impl_vv!("lsx", lsx_vneg_w, is::simd_neg, m128i, i32x4); +impl_vv!("lsx", lsx_vneg_d, is::simd_neg, m128i, i64x2); +impl_vv!("lsx", lsx_vfsqrt_s, is::simd_fsqrt, m128, f32x4); +impl_vv!("lsx", lsx_vfsqrt_d, is::simd_fsqrt, m128d, f64x2); -impl_gv!("lsx", lsx_vreplgr2vr_b, simdl_splat, m128i, i8x16, i32); -impl_gv!("lsx", lsx_vreplgr2vr_h, simdl_splat, m128i, i16x8, i32); -impl_gv!("lsx", lsx_vreplgr2vr_w, simdl_splat, m128i, i32x4, i32); -impl_gv!("lsx", lsx_vreplgr2vr_d, simdl_splat, m128i, i64x2, i64); +impl_gv!("lsx", lsx_vreplgr2vr_b, ls::simd_splat, m128i, i8x16, i32); +impl_gv!("lsx", lsx_vreplgr2vr_h, ls::simd_splat, m128i, i16x8, i32); +impl_gv!("lsx", lsx_vreplgr2vr_w, ls::simd_splat, m128i, i32x4, i32); +impl_gv!("lsx", lsx_vreplgr2vr_d, ls::simd_splat, m128i, i64x2, i64); -impl_sv!("lsx", lsx_vrepli_b, simdl_splat, m128i, i8x16, 10); -impl_sv!("lsx", lsx_vrepli_h, simdl_splat, m128i, i16x8, 10); -impl_sv!("lsx", lsx_vrepli_w, simdl_splat, m128i, i32x4, 10); -impl_sv!("lsx", lsx_vrepli_d, simdl_splat, m128i, i64x2, 10); +impl_sv!("lsx", lsx_vrepli_b, ls::simd_splat, m128i, i8x16, 10); +impl_sv!("lsx", lsx_vrepli_h, ls::simd_splat, m128i, i16x8, 10); +impl_sv!("lsx", lsx_vrepli_w, ls::simd_splat, m128i, i32x4, 10); +impl_sv!("lsx", lsx_vrepli_d, ls::simd_splat, m128i, i64x2, 10); -impl_vvv!("lsx", lsx_vadd_b, simd_add, m128i, i8x16); -impl_vvv!("lsx", lsx_vadd_h, simd_add, m128i, i16x8); -impl_vvv!("lsx", lsx_vadd_w, simd_add, m128i, i32x4); -impl_vvv!("lsx", lsx_vadd_d, simd_add, m128i, i64x2); -impl_vvv!("lsx", lsx_vsub_b, simd_sub, m128i, i8x16); -impl_vvv!("lsx", lsx_vsub_h, simd_sub, m128i, i16x8); -impl_vvv!("lsx", lsx_vsub_w, simd_sub, m128i, i32x4); -impl_vvv!("lsx", lsx_vsub_d, simd_sub, m128i, i64x2); -impl_vvv!("lsx", lsx_vmax_b, simd_imax, m128i, i8x16); -impl_vvv!("lsx", lsx_vmax_h, simd_imax, m128i, i16x8); -impl_vvv!("lsx", lsx_vmax_w, simd_imax, m128i, i32x4); -impl_vvv!("lsx", lsx_vmax_d, simd_imax, m128i, i64x2); -impl_vvv!("lsx", lsx_vmax_bu, simd_imax, m128i, u8x16); -impl_vvv!("lsx", lsx_vmax_hu, simd_imax, m128i, u16x8); -impl_vvv!("lsx", lsx_vmax_wu, simd_imax, m128i, u32x4); -impl_vvv!("lsx", lsx_vmax_du, simd_imax, m128i, u64x2); -impl_vvv!("lsx", lsx_vmin_b, simd_imin, m128i, i8x16); -impl_vvv!("lsx", lsx_vmin_h, simd_imin, m128i, i16x8); -impl_vvv!("lsx", lsx_vmin_w, simd_imin, m128i, i32x4); -impl_vvv!("lsx", lsx_vmin_d, simd_imin, m128i, i64x2); -impl_vvv!("lsx", lsx_vmin_bu, simd_imin, m128i, u8x16); -impl_vvv!("lsx", lsx_vmin_hu, simd_imin, m128i, u16x8); -impl_vvv!("lsx", lsx_vmin_wu, simd_imin, m128i, u32x4); -impl_vvv!("lsx", lsx_vmin_du, simd_imin, m128i, u64x2); -impl_vvv!("lsx", lsx_vseq_b, simd_eq, m128i, i8x16); -impl_vvv!("lsx", lsx_vseq_h, simd_eq, m128i, i16x8); -impl_vvv!("lsx", lsx_vseq_w, simd_eq, m128i, i32x4); -impl_vvv!("lsx", lsx_vseq_d, simd_eq, m128i, i64x2); -impl_vvv!("lsx", lsx_vslt_b, simd_lt, m128i, i8x16); -impl_vvv!("lsx", lsx_vslt_h, simd_lt, m128i, i16x8); -impl_vvv!("lsx", lsx_vslt_w, simd_lt, m128i, i32x4); -impl_vvv!("lsx", lsx_vslt_d, simd_lt, m128i, i64x2); -impl_vvv!("lsx", lsx_vslt_bu, simd_lt, m128i, u8x16); -impl_vvv!("lsx", lsx_vslt_hu, simd_lt, m128i, u16x8); -impl_vvv!("lsx", lsx_vslt_wu, simd_lt, m128i, u32x4); -impl_vvv!("lsx", lsx_vslt_du, simd_lt, m128i, u64x2); -impl_vvv!("lsx", lsx_vsle_b, simd_le, m128i, i8x16); -impl_vvv!("lsx", lsx_vsle_h, simd_le, m128i, i16x8); -impl_vvv!("lsx", lsx_vsle_w, simd_le, m128i, i32x4); -impl_vvv!("lsx", lsx_vsle_d, simd_le, m128i, i64x2); -impl_vvv!("lsx", lsx_vsle_bu, simd_le, m128i, u8x16); -impl_vvv!("lsx", lsx_vsle_hu, simd_le, m128i, u16x8); -impl_vvv!("lsx", lsx_vsle_wu, simd_le, m128i, u32x4); -impl_vvv!("lsx", lsx_vsle_du, simd_le, m128i, u64x2); -impl_vvv!("lsx", lsx_vmul_b, simd_mul, m128i, i8x16); -impl_vvv!("lsx", lsx_vmul_h, simd_mul, m128i, i16x8); -impl_vvv!("lsx", lsx_vmul_w, simd_mul, m128i, i32x4); -impl_vvv!("lsx", lsx_vmul_d, simd_mul, m128i, i64x2); -impl_vvv!("lsx", lsx_vdiv_b, simd_div, m128i, i8x16); -impl_vvv!("lsx", lsx_vdiv_h, simd_div, m128i, i16x8); -impl_vvv!("lsx", lsx_vdiv_w, simd_div, m128i, i32x4); -impl_vvv!("lsx", lsx_vdiv_d, simd_div, m128i, i64x2); -impl_vvv!("lsx", lsx_vdiv_bu, simd_div, m128i, u8x16); -impl_vvv!("lsx", lsx_vdiv_hu, simd_div, m128i, u16x8); -impl_vvv!("lsx", lsx_vdiv_wu, simd_div, m128i, u32x4); -impl_vvv!("lsx", lsx_vdiv_du, simd_div, m128i, u64x2); -impl_vvv!("lsx", lsx_vmod_b, simd_rem, m128i, i8x16); -impl_vvv!("lsx", lsx_vmod_h, simd_rem, m128i, i16x8); -impl_vvv!("lsx", lsx_vmod_w, simd_rem, m128i, i32x4); -impl_vvv!("lsx", lsx_vmod_d, simd_rem, m128i, i64x2); -impl_vvv!("lsx", lsx_vmod_bu, simd_rem, m128i, u8x16); -impl_vvv!("lsx", lsx_vmod_hu, simd_rem, m128i, u16x8); -impl_vvv!("lsx", lsx_vmod_wu, simd_rem, m128i, u32x4); -impl_vvv!("lsx", lsx_vmod_du, simd_rem, m128i, u64x2); -impl_vvv!("lsx", lsx_vand_v, simd_and, m128i, u8x16); -impl_vvv!("lsx", lsx_vandn_v, simdl_andn, m128i, u8x16); -impl_vvv!("lsx", lsx_vor_v, simd_or, m128i, u8x16); -impl_vvv!("lsx", lsx_vorn_v, simdl_orn, m128i, u8x16); -impl_vvv!("lsx", lsx_vnor_v, simdl_nor, m128i, u8x16); -impl_vvv!("lsx", lsx_vxor_v, simd_xor, m128i, u8x16); -impl_vvv!("lsx", lsx_vfadd_s, simd_add, m128, f32x4); -impl_vvv!("lsx", lsx_vfadd_d, simd_add, m128d, f64x2); -impl_vvv!("lsx", lsx_vfsub_s, simd_sub, m128, f32x4); -impl_vvv!("lsx", lsx_vfsub_d, simd_sub, m128d, f64x2); -impl_vvv!("lsx", lsx_vfmul_s, simd_mul, m128, f32x4); -impl_vvv!("lsx", lsx_vfmul_d, simd_mul, m128d, f64x2); -impl_vvv!("lsx", lsx_vfdiv_s, simd_div, m128, f32x4); -impl_vvv!("lsx", lsx_vfdiv_d, simd_div, m128d, f64x2); -impl_vvv!("lsx", lsx_vsll_b, simdl_shl, m128i, i8x16); -impl_vvv!("lsx", lsx_vsll_h, simdl_shl, m128i, i16x8); -impl_vvv!("lsx", lsx_vsll_w, simdl_shl, m128i, i32x4); -impl_vvv!("lsx", lsx_vsll_d, simdl_shl, m128i, i64x2); -impl_vvv!("lsx", lsx_vsra_b, simdl_shr, m128i, i8x16); -impl_vvv!("lsx", lsx_vsra_h, simdl_shr, m128i, i16x8); -impl_vvv!("lsx", lsx_vsra_w, simdl_shr, m128i, i32x4); -impl_vvv!("lsx", lsx_vsra_d, simdl_shr, m128i, i64x2); -impl_vvv!("lsx", lsx_vsrl_b, simdl_shr, m128i, u8x16); -impl_vvv!("lsx", lsx_vsrl_h, simdl_shr, m128i, u16x8); -impl_vvv!("lsx", lsx_vsrl_w, simdl_shr, m128i, u32x4); -impl_vvv!("lsx", lsx_vsrl_d, simdl_shr, m128i, u64x2); +impl_vvv!("lsx", lsx_vadd_b, is::simd_add, m128i, i8x16); +impl_vvv!("lsx", lsx_vadd_h, is::simd_add, m128i, i16x8); +impl_vvv!("lsx", lsx_vadd_w, is::simd_add, m128i, i32x4); +impl_vvv!("lsx", lsx_vadd_d, is::simd_add, m128i, i64x2); +impl_vvv!("lsx", lsx_vsub_b, is::simd_sub, m128i, i8x16); +impl_vvv!("lsx", lsx_vsub_h, is::simd_sub, m128i, i16x8); +impl_vvv!("lsx", lsx_vsub_w, is::simd_sub, m128i, i32x4); +impl_vvv!("lsx", lsx_vsub_d, is::simd_sub, m128i, i64x2); +impl_vvv!("lsx", lsx_vmax_b, cs::simd_imax, m128i, i8x16); +impl_vvv!("lsx", lsx_vmax_h, cs::simd_imax, m128i, i16x8); +impl_vvv!("lsx", lsx_vmax_w, cs::simd_imax, m128i, i32x4); +impl_vvv!("lsx", lsx_vmax_d, cs::simd_imax, m128i, i64x2); +impl_vvv!("lsx", lsx_vmax_bu, cs::simd_imax, m128i, u8x16); +impl_vvv!("lsx", lsx_vmax_hu, cs::simd_imax, m128i, u16x8); +impl_vvv!("lsx", lsx_vmax_wu, cs::simd_imax, m128i, u32x4); +impl_vvv!("lsx", lsx_vmax_du, cs::simd_imax, m128i, u64x2); +impl_vvv!("lsx", lsx_vmin_b, cs::simd_imin, m128i, i8x16); +impl_vvv!("lsx", lsx_vmin_h, cs::simd_imin, m128i, i16x8); +impl_vvv!("lsx", lsx_vmin_w, cs::simd_imin, m128i, i32x4); +impl_vvv!("lsx", lsx_vmin_d, cs::simd_imin, m128i, i64x2); +impl_vvv!("lsx", lsx_vmin_bu, cs::simd_imin, m128i, u8x16); +impl_vvv!("lsx", lsx_vmin_hu, cs::simd_imin, m128i, u16x8); +impl_vvv!("lsx", lsx_vmin_wu, cs::simd_imin, m128i, u32x4); +impl_vvv!("lsx", lsx_vmin_du, cs::simd_imin, m128i, u64x2); +impl_vvv!("lsx", lsx_vseq_b, is::simd_eq, m128i, i8x16); +impl_vvv!("lsx", lsx_vseq_h, is::simd_eq, m128i, i16x8); +impl_vvv!("lsx", lsx_vseq_w, is::simd_eq, m128i, i32x4); +impl_vvv!("lsx", lsx_vseq_d, is::simd_eq, m128i, i64x2); +impl_vvv!("lsx", lsx_vslt_b, is::simd_lt, m128i, i8x16); +impl_vvv!("lsx", lsx_vslt_h, is::simd_lt, m128i, i16x8); +impl_vvv!("lsx", lsx_vslt_w, is::simd_lt, m128i, i32x4); +impl_vvv!("lsx", lsx_vslt_d, is::simd_lt, m128i, i64x2); +impl_vvv!("lsx", lsx_vslt_bu, is::simd_lt, m128i, u8x16); +impl_vvv!("lsx", lsx_vslt_hu, is::simd_lt, m128i, u16x8); +impl_vvv!("lsx", lsx_vslt_wu, is::simd_lt, m128i, u32x4); +impl_vvv!("lsx", lsx_vslt_du, is::simd_lt, m128i, u64x2); +impl_vvv!("lsx", lsx_vsle_b, is::simd_le, m128i, i8x16); +impl_vvv!("lsx", lsx_vsle_h, is::simd_le, m128i, i16x8); +impl_vvv!("lsx", lsx_vsle_w, is::simd_le, m128i, i32x4); +impl_vvv!("lsx", lsx_vsle_d, is::simd_le, m128i, i64x2); +impl_vvv!("lsx", lsx_vsle_bu, is::simd_le, m128i, u8x16); +impl_vvv!("lsx", lsx_vsle_hu, is::simd_le, m128i, u16x8); +impl_vvv!("lsx", lsx_vsle_wu, is::simd_le, m128i, u32x4); +impl_vvv!("lsx", lsx_vsle_du, is::simd_le, m128i, u64x2); +impl_vvv!("lsx", lsx_vmul_b, is::simd_mul, m128i, i8x16); +impl_vvv!("lsx", lsx_vmul_h, is::simd_mul, m128i, i16x8); +impl_vvv!("lsx", lsx_vmul_w, is::simd_mul, m128i, i32x4); +impl_vvv!("lsx", lsx_vmul_d, is::simd_mul, m128i, i64x2); +impl_vvv!("lsx", lsx_vdiv_b, is::simd_div, m128i, i8x16); +impl_vvv!("lsx", lsx_vdiv_h, is::simd_div, m128i, i16x8); +impl_vvv!("lsx", lsx_vdiv_w, is::simd_div, m128i, i32x4); +impl_vvv!("lsx", lsx_vdiv_d, is::simd_div, m128i, i64x2); +impl_vvv!("lsx", lsx_vdiv_bu, is::simd_div, m128i, u8x16); +impl_vvv!("lsx", lsx_vdiv_hu, is::simd_div, m128i, u16x8); +impl_vvv!("lsx", lsx_vdiv_wu, is::simd_div, m128i, u32x4); +impl_vvv!("lsx", lsx_vdiv_du, is::simd_div, m128i, u64x2); +impl_vvv!("lsx", lsx_vmod_b, is::simd_rem, m128i, i8x16); +impl_vvv!("lsx", lsx_vmod_h, is::simd_rem, m128i, i16x8); +impl_vvv!("lsx", lsx_vmod_w, is::simd_rem, m128i, i32x4); +impl_vvv!("lsx", lsx_vmod_d, is::simd_rem, m128i, i64x2); +impl_vvv!("lsx", lsx_vmod_bu, is::simd_rem, m128i, u8x16); +impl_vvv!("lsx", lsx_vmod_hu, is::simd_rem, m128i, u16x8); +impl_vvv!("lsx", lsx_vmod_wu, is::simd_rem, m128i, u32x4); +impl_vvv!("lsx", lsx_vmod_du, is::simd_rem, m128i, u64x2); +impl_vvv!("lsx", lsx_vand_v, is::simd_and, m128i, u8x16); +impl_vvv!("lsx", lsx_vandn_v, ls::simd_andn, m128i, u8x16); +impl_vvv!("lsx", lsx_vor_v, is::simd_or, m128i, u8x16); +impl_vvv!("lsx", lsx_vorn_v, ls::simd_orn, m128i, u8x16); +impl_vvv!("lsx", lsx_vnor_v, ls::simd_nor, m128i, u8x16); +impl_vvv!("lsx", lsx_vxor_v, is::simd_xor, m128i, u8x16); +impl_vvv!("lsx", lsx_vfadd_s, is::simd_add, m128, f32x4); +impl_vvv!("lsx", lsx_vfadd_d, is::simd_add, m128d, f64x2); +impl_vvv!("lsx", lsx_vfsub_s, is::simd_sub, m128, f32x4); +impl_vvv!("lsx", lsx_vfsub_d, is::simd_sub, m128d, f64x2); +impl_vvv!("lsx", lsx_vfmul_s, is::simd_mul, m128, f32x4); +impl_vvv!("lsx", lsx_vfmul_d, is::simd_mul, m128d, f64x2); +impl_vvv!("lsx", lsx_vfdiv_s, is::simd_div, m128, f32x4); +impl_vvv!("lsx", lsx_vfdiv_d, is::simd_div, m128d, f64x2); +impl_vvv!("lsx", lsx_vsll_b, ls::simd_shl, m128i, i8x16); +impl_vvv!("lsx", lsx_vsll_h, ls::simd_shl, m128i, i16x8); +impl_vvv!("lsx", lsx_vsll_w, ls::simd_shl, m128i, i32x4); +impl_vvv!("lsx", lsx_vsll_d, ls::simd_shl, m128i, i64x2); +impl_vvv!("lsx", lsx_vsra_b, ls::simd_shr, m128i, i8x16); +impl_vvv!("lsx", lsx_vsra_h, ls::simd_shr, m128i, i16x8); +impl_vvv!("lsx", lsx_vsra_w, ls::simd_shr, m128i, i32x4); +impl_vvv!("lsx", lsx_vsra_d, ls::simd_shr, m128i, i64x2); +impl_vvv!("lsx", lsx_vsrl_b, ls::simd_shr, m128i, u8x16); +impl_vvv!("lsx", lsx_vsrl_h, ls::simd_shr, m128i, u16x8); +impl_vvv!("lsx", lsx_vsrl_w, ls::simd_shr, m128i, u32x4); +impl_vvv!("lsx", lsx_vsrl_d, ls::simd_shr, m128i, u64x2); -impl_vuv!("lsx", lsx_vslli_b, simd_shl, m128i, i8x16); -impl_vuv!("lsx", lsx_vslli_h, simd_shl, m128i, i16x8); -impl_vuv!("lsx", lsx_vslli_w, simd_shl, m128i, i32x4); -impl_vuv!("lsx", lsx_vslli_d, simd_shl, m128i, i64x2); -impl_vuv!("lsx", lsx_vsrai_b, simd_shr, m128i, i8x16); -impl_vuv!("lsx", lsx_vsrai_h, simd_shr, m128i, i16x8); -impl_vuv!("lsx", lsx_vsrai_w, simd_shr, m128i, i32x4); -impl_vuv!("lsx", lsx_vsrai_d, simd_shr, m128i, i64x2); -impl_vuv!("lsx", lsx_vsrli_b, simd_shr, m128i, u8x16); -impl_vuv!("lsx", lsx_vsrli_h, simd_shr, m128i, u16x8); -impl_vuv!("lsx", lsx_vsrli_w, simd_shr, m128i, u32x4); -impl_vuv!("lsx", lsx_vsrli_d, simd_shr, m128i, u64x2); -impl_vuv!("lsx", lsx_vaddi_bu, simd_add, m128i, u8x16, 5); -impl_vuv!("lsx", lsx_vaddi_hu, simd_add, m128i, u16x8, 5); -impl_vuv!("lsx", lsx_vaddi_wu, simd_add, m128i, u32x4, 5); -impl_vuv!("lsx", lsx_vaddi_du, simd_add, m128i, u64x2, 5); -impl_vuv!("lsx", lsx_vslti_bu, simd_lt, m128i, u8x16, 5); -impl_vuv!("lsx", lsx_vslti_hu, simd_lt, m128i, u16x8, 5); -impl_vuv!("lsx", lsx_vslti_wu, simd_lt, m128i, u32x4, 5); -impl_vuv!("lsx", lsx_vslti_du, simd_lt, m128i, u64x2, 5); -impl_vuv!("lsx", lsx_vslei_bu, simd_le, m128i, u8x16, 5); -impl_vuv!("lsx", lsx_vslei_hu, simd_le, m128i, u16x8, 5); -impl_vuv!("lsx", lsx_vslei_wu, simd_le, m128i, u32x4, 5); -impl_vuv!("lsx", lsx_vslei_du, simd_le, m128i, u64x2, 5); -impl_vuv!("lsx", lsx_vmaxi_bu, simd_imax, m128i, u8x16, 5); -impl_vuv!("lsx", lsx_vmaxi_hu, simd_imax, m128i, u16x8, 5); -impl_vuv!("lsx", lsx_vmaxi_wu, simd_imax, m128i, u32x4, 5); -impl_vuv!("lsx", lsx_vmaxi_du, simd_imax, m128i, u64x2, 5); -impl_vuv!("lsx", lsx_vmini_bu, simd_imin, m128i, u8x16, 5); -impl_vuv!("lsx", lsx_vmini_hu, simd_imin, m128i, u16x8, 5); -impl_vuv!("lsx", lsx_vmini_wu, simd_imin, m128i, u32x4, 5); -impl_vuv!("lsx", lsx_vmini_du, simd_imin, m128i, u64x2, 5); +impl_vuv!("lsx", lsx_vslli_b, is::simd_shl, m128i, i8x16); +impl_vuv!("lsx", lsx_vslli_h, is::simd_shl, m128i, i16x8); +impl_vuv!("lsx", lsx_vslli_w, is::simd_shl, m128i, i32x4); +impl_vuv!("lsx", lsx_vslli_d, is::simd_shl, m128i, i64x2); +impl_vuv!("lsx", lsx_vsrai_b, is::simd_shr, m128i, i8x16); +impl_vuv!("lsx", lsx_vsrai_h, is::simd_shr, m128i, i16x8); +impl_vuv!("lsx", lsx_vsrai_w, is::simd_shr, m128i, i32x4); +impl_vuv!("lsx", lsx_vsrai_d, is::simd_shr, m128i, i64x2); +impl_vuv!("lsx", lsx_vsrli_b, is::simd_shr, m128i, u8x16); +impl_vuv!("lsx", lsx_vsrli_h, is::simd_shr, m128i, u16x8); +impl_vuv!("lsx", lsx_vsrli_w, is::simd_shr, m128i, u32x4); +impl_vuv!("lsx", lsx_vsrli_d, is::simd_shr, m128i, u64x2); +impl_vuv!("lsx", lsx_vaddi_bu, is::simd_add, m128i, u8x16, 5); +impl_vuv!("lsx", lsx_vaddi_hu, is::simd_add, m128i, u16x8, 5); +impl_vuv!("lsx", lsx_vaddi_wu, is::simd_add, m128i, u32x4, 5); +impl_vuv!("lsx", lsx_vaddi_du, is::simd_add, m128i, u64x2, 5); +impl_vuv!("lsx", lsx_vslti_bu, is::simd_lt, m128i, u8x16, 5); +impl_vuv!("lsx", lsx_vslti_hu, is::simd_lt, m128i, u16x8, 5); +impl_vuv!("lsx", lsx_vslti_wu, is::simd_lt, m128i, u32x4, 5); +impl_vuv!("lsx", lsx_vslti_du, is::simd_lt, m128i, u64x2, 5); +impl_vuv!("lsx", lsx_vslei_bu, is::simd_le, m128i, u8x16, 5); +impl_vuv!("lsx", lsx_vslei_hu, is::simd_le, m128i, u16x8, 5); +impl_vuv!("lsx", lsx_vslei_wu, is::simd_le, m128i, u32x4, 5); +impl_vuv!("lsx", lsx_vslei_du, is::simd_le, m128i, u64x2, 5); +impl_vuv!("lsx", lsx_vmaxi_bu, cs::simd_imax, m128i, u8x16, 5); +impl_vuv!("lsx", lsx_vmaxi_hu, cs::simd_imax, m128i, u16x8, 5); +impl_vuv!("lsx", lsx_vmaxi_wu, cs::simd_imax, m128i, u32x4, 5); +impl_vuv!("lsx", lsx_vmaxi_du, cs::simd_imax, m128i, u64x2, 5); +impl_vuv!("lsx", lsx_vmini_bu, cs::simd_imin, m128i, u8x16, 5); +impl_vuv!("lsx", lsx_vmini_hu, cs::simd_imin, m128i, u16x8, 5); +impl_vuv!("lsx", lsx_vmini_wu, cs::simd_imin, m128i, u32x4, 5); +impl_vuv!("lsx", lsx_vmini_du, cs::simd_imin, m128i, u64x2, 5); -impl_vug!("lsx", lsx_vpickve2gr_b, simd_extract, m128i, i8x16, i32, 4); -impl_vug!("lsx", lsx_vpickve2gr_h, simd_extract, m128i, i16x8, i32, 3); -impl_vug!("lsx", lsx_vpickve2gr_w, simd_extract, m128i, i32x4, i32, 2); -impl_vug!("lsx", lsx_vpickve2gr_d, simd_extract, m128i, i64x2, i64, 1); -impl_vug!("lsx", lsx_vpickve2gr_bu, simd_extract, m128i, u8x16, u32, 4); -impl_vug!("lsx", lsx_vpickve2gr_hu, simd_extract, m128i, u16x8, u32, 3); -impl_vug!("lsx", lsx_vpickve2gr_wu, simd_extract, m128i, u32x4, u32, 2); -impl_vug!("lsx", lsx_vpickve2gr_du, simd_extract, m128i, u64x2, u64, 1); +impl_vug!("lsx", lsx_vpickve2gr_b, is::simd_extract, m128i, i8x16, i32, 4); +impl_vug!("lsx", lsx_vpickve2gr_h, is::simd_extract, m128i, i16x8, i32, 3); +impl_vug!("lsx", lsx_vpickve2gr_w, is::simd_extract, m128i, i32x4, i32, 2); +impl_vug!("lsx", lsx_vpickve2gr_d, is::simd_extract, m128i, i64x2, i64, 1); +impl_vug!("lsx", lsx_vpickve2gr_bu, is::simd_extract, m128i, u8x16, u32, 4); +impl_vug!("lsx", lsx_vpickve2gr_hu, is::simd_extract, m128i, u16x8, u32, 3); +impl_vug!("lsx", lsx_vpickve2gr_wu, is::simd_extract, m128i, u32x4, u32, 2); +impl_vug!("lsx", lsx_vpickve2gr_du, is::simd_extract, m128i, u64x2, u64, 1); -impl_vsv!("lsx", lsx_vseqi_b, simd_eq, m128i, i8x16, 5); -impl_vsv!("lsx", lsx_vseqi_h, simd_eq, m128i, i16x8, 5); -impl_vsv!("lsx", lsx_vseqi_w, simd_eq, m128i, i32x4, 5); -impl_vsv!("lsx", lsx_vseqi_d, simd_eq, m128i, i64x2, 5); -impl_vsv!("lsx", lsx_vslti_b, simd_lt, m128i, i8x16, 5); -impl_vsv!("lsx", lsx_vslti_h, simd_lt, m128i, i16x8, 5); -impl_vsv!("lsx", lsx_vslti_w, simd_lt, m128i, i32x4, 5); -impl_vsv!("lsx", lsx_vslti_d, simd_lt, m128i, i64x2, 5); -impl_vsv!("lsx", lsx_vslei_b, simd_le, m128i, i8x16, 5); -impl_vsv!("lsx", lsx_vslei_h, simd_le, m128i, i16x8, 5); -impl_vsv!("lsx", lsx_vslei_w, simd_le, m128i, i32x4, 5); -impl_vsv!("lsx", lsx_vslei_d, simd_le, m128i, i64x2, 5); -impl_vsv!("lsx", lsx_vmaxi_b, simd_imax, m128i, i8x16, 5); -impl_vsv!("lsx", lsx_vmaxi_h, simd_imax, m128i, i16x8, 5); -impl_vsv!("lsx", lsx_vmaxi_w, simd_imax, m128i, i32x4, 5); -impl_vsv!("lsx", lsx_vmaxi_d, simd_imax, m128i, i64x2, 5); -impl_vsv!("lsx", lsx_vmini_b, simd_imin, m128i, i8x16, 5); -impl_vsv!("lsx", lsx_vmini_h, simd_imin, m128i, i16x8, 5); -impl_vsv!("lsx", lsx_vmini_w, simd_imin, m128i, i32x4, 5); -impl_vsv!("lsx", lsx_vmini_d, simd_imin, m128i, i64x2, 5); +impl_vsv!("lsx", lsx_vseqi_b, is::simd_eq, m128i, i8x16, 5); +impl_vsv!("lsx", lsx_vseqi_h, is::simd_eq, m128i, i16x8, 5); +impl_vsv!("lsx", lsx_vseqi_w, is::simd_eq, m128i, i32x4, 5); +impl_vsv!("lsx", lsx_vseqi_d, is::simd_eq, m128i, i64x2, 5); +impl_vsv!("lsx", lsx_vslti_b, is::simd_lt, m128i, i8x16, 5); +impl_vsv!("lsx", lsx_vslti_h, is::simd_lt, m128i, i16x8, 5); +impl_vsv!("lsx", lsx_vslti_w, is::simd_lt, m128i, i32x4, 5); +impl_vsv!("lsx", lsx_vslti_d, is::simd_lt, m128i, i64x2, 5); +impl_vsv!("lsx", lsx_vslei_b, is::simd_le, m128i, i8x16, 5); +impl_vsv!("lsx", lsx_vslei_h, is::simd_le, m128i, i16x8, 5); +impl_vsv!("lsx", lsx_vslei_w, is::simd_le, m128i, i32x4, 5); +impl_vsv!("lsx", lsx_vslei_d, is::simd_le, m128i, i64x2, 5); +impl_vsv!("lsx", lsx_vmaxi_b, cs::simd_imax, m128i, i8x16, 5); +impl_vsv!("lsx", lsx_vmaxi_h, cs::simd_imax, m128i, i16x8, 5); +impl_vsv!("lsx", lsx_vmaxi_w, cs::simd_imax, m128i, i32x4, 5); +impl_vsv!("lsx", lsx_vmaxi_d, cs::simd_imax, m128i, i64x2, 5); +impl_vsv!("lsx", lsx_vmini_b, cs::simd_imin, m128i, i8x16, 5); +impl_vsv!("lsx", lsx_vmini_h, cs::simd_imin, m128i, i16x8, 5); +impl_vsv!("lsx", lsx_vmini_w, cs::simd_imin, m128i, i32x4, 5); +impl_vsv!("lsx", lsx_vmini_d, cs::simd_imin, m128i, i64x2, 5); -impl_vvvv!("lsx", lsx_vmadd_b, simdl_madd, m128i, i8x16); -impl_vvvv!("lsx", lsx_vmadd_h, simdl_madd, m128i, i16x8); -impl_vvvv!("lsx", lsx_vmadd_w, simdl_madd, m128i, i32x4); -impl_vvvv!("lsx", lsx_vmadd_d, simdl_madd, m128i, i64x2); -impl_vvvv!("lsx", lsx_vmsub_b, simdl_msub, m128i, i8x16); -impl_vvvv!("lsx", lsx_vmsub_h, simdl_msub, m128i, i16x8); -impl_vvvv!("lsx", lsx_vmsub_w, simdl_msub, m128i, i32x4); -impl_vvvv!("lsx", lsx_vmsub_d, simdl_msub, m128i, i64x2); -impl_vvvv!("lsx", lsx_vfmadd_s, simd_fma, m128, f32x4); -impl_vvvv!("lsx", lsx_vfmadd_d, simd_fma, m128d, f64x2); -impl_vvvv!("lsx", lsx_vfmsub_s, simdl_fms, m128, f32x4); -impl_vvvv!("lsx", lsx_vfmsub_d, simdl_fms, m128d, f64x2); -impl_vvvv!("lsx", lsx_vfnmadd_s, simdl_nfma, m128, f32x4); -impl_vvvv!("lsx", lsx_vfnmadd_d, simdl_nfma, m128d, f64x2); -impl_vvvv!("lsx", lsx_vfnmsub_s, simdl_nfms, m128, f32x4); -impl_vvvv!("lsx", lsx_vfnmsub_d, simdl_nfms, m128d, f64x2); +impl_vvvv!("lsx", lsx_vmadd_b, ls::simd_madd, m128i, i8x16); +impl_vvvv!("lsx", lsx_vmadd_h, ls::simd_madd, m128i, i16x8); +impl_vvvv!("lsx", lsx_vmadd_w, ls::simd_madd, m128i, i32x4); +impl_vvvv!("lsx", lsx_vmadd_d, ls::simd_madd, m128i, i64x2); +impl_vvvv!("lsx", lsx_vmsub_b, ls::simd_msub, m128i, i8x16); +impl_vvvv!("lsx", lsx_vmsub_h, ls::simd_msub, m128i, i16x8); +impl_vvvv!("lsx", lsx_vmsub_w, ls::simd_msub, m128i, i32x4); +impl_vvvv!("lsx", lsx_vmsub_d, ls::simd_msub, m128i, i64x2); +impl_vvvv!("lsx", lsx_vfmadd_s, is::simd_fma, m128, f32x4); +impl_vvvv!("lsx", lsx_vfmadd_d, is::simd_fma, m128d, f64x2); +impl_vvvv!("lsx", lsx_vfmsub_s, ls::simd_fmsub, m128, f32x4); +impl_vvvv!("lsx", lsx_vfmsub_d, ls::simd_fmsub, m128d, f64x2); +impl_vvvv!("lsx", lsx_vfnmadd_s, ls::simd_fnmadd, m128, f32x4); +impl_vvvv!("lsx", lsx_vfnmadd_d, ls::simd_fnmadd, m128d, f64x2); +impl_vvvv!("lsx", lsx_vfnmsub_s, ls::simd_fnmsub, m128, f32x4); +impl_vvvv!("lsx", lsx_vfnmsub_d, ls::simd_fnmsub, m128d, f64x2); -impl_vugv!("lsx", lsx_vinsgr2vr_b, simd_insert, m128i, i8x16, i32, 4); -impl_vugv!("lsx", lsx_vinsgr2vr_h, simd_insert, m128i, i16x8, i32, 3); -impl_vugv!("lsx", lsx_vinsgr2vr_w, simd_insert, m128i, i32x4, i32, 2); -impl_vugv!("lsx", lsx_vinsgr2vr_d, simd_insert, m128i, i64x2, i64, 1); +impl_vugv!("lsx", lsx_vinsgr2vr_b, is::simd_insert, m128i, i8x16, i32, 4); +impl_vugv!("lsx", lsx_vinsgr2vr_h, is::simd_insert, m128i, i16x8, i32, 3); +impl_vugv!("lsx", lsx_vinsgr2vr_w, is::simd_insert, m128i, i32x4, i32, 2); +impl_vugv!("lsx", lsx_vinsgr2vr_d, is::simd_insert, m128i, i64x2, i64, 1); diff --git a/library/stdarch/crates/core_arch/src/loongarch64/simd.rs b/library/stdarch/crates/core_arch/src/loongarch64/simd.rs index ac98d6ac5dcda..2dedfa7eb90e0 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/simd.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/simd.rs @@ -1,127 +1,126 @@ //! LoongArch64 SIMD helpers -pub(super) const trait SimdL: Sized { +use self as ls; +use crate::intrinsics::simd as is; + +// Internal extension trait for concrete `Simd` types. +// +// Provides a small set of helper functionality (`Elem` and `splat`) +// so generic and macro-based code can operate on different SIMD +// vector types in a uniform way. +pub(super) const trait SimdExt: Sized { type Elem; unsafe fn splat(v: i64) -> Self; } -macro_rules! impl_simdl { +macro_rules! impl_simd_ext { ($v:ident, $e:ty) => { #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] - impl const SimdL for crate::core_arch::simd::$v { + impl const SimdExt for crate::core_arch::simd::$v { type Elem = $e; #[inline(always)] unsafe fn splat(v: i64) -> Self { - crate::intrinsics::simd::simd_splat(v as Self::Elem) + is::simd_splat(v as Self::Elem) } } }; } -impl_simdl!(i8x16, i8); -impl_simdl!(i8x32, i8); -impl_simdl!(u8x16, u8); -impl_simdl!(u8x32, u8); -impl_simdl!(i16x8, i16); -impl_simdl!(i16x16, i16); -impl_simdl!(u16x8, u16); -impl_simdl!(u16x16, u16); -impl_simdl!(i32x4, i32); -impl_simdl!(i32x8, i32); -impl_simdl!(u32x4, u32); -impl_simdl!(u32x8, u32); -impl_simdl!(i64x2, i64); -impl_simdl!(i64x4, i64); -impl_simdl!(u64x2, u64); -impl_simdl!(u64x4, u64); +impl_simd_ext!(i8x16, i8); +impl_simd_ext!(i8x32, i8); +impl_simd_ext!(u8x16, u8); +impl_simd_ext!(u8x32, u8); +impl_simd_ext!(i16x8, i16); +impl_simd_ext!(i16x16, i16); +impl_simd_ext!(u16x8, u16); +impl_simd_ext!(u16x16, u16); +impl_simd_ext!(i32x4, i32); +impl_simd_ext!(i32x8, i32); +impl_simd_ext!(u32x4, u32); +impl_simd_ext!(u32x8, u32); +impl_simd_ext!(i64x2, i64); +impl_simd_ext!(i64x4, i64); +impl_simd_ext!(u64x2, u64); +impl_simd_ext!(u64x4, u64); #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_andn(a: T, b: T) -> T { - crate::intrinsics::simd::simd_and(simdl_not(a), b) +pub(super) const unsafe fn simd_andn(a: T, b: T) -> T { + is::simd_and(ls::simd_not(a), b) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_fms(a: T, b: T, c: T) -> T { - let c: T = crate::intrinsics::simd::simd_neg(c); - crate::intrinsics::simd::simd_fma(a, b, c) +pub(super) const unsafe fn simd_fmsub(a: T, b: T, c: T) -> T { + is::simd_fma(a, b, is::simd_neg(c)) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_madd(a: T, b: T, c: T) -> T { - let mul: T = crate::intrinsics::simd::simd_mul(b, c); - crate::intrinsics::simd::simd_add(mul, a) +pub(super) const unsafe fn simd_fnmadd(a: T, b: T, c: T) -> T { + is::simd_neg(is::simd_fma(a, b, c)) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_msub(a: T, b: T, c: T) -> T { - let mul: T = crate::intrinsics::simd::simd_mul(b, c); - crate::intrinsics::simd::simd_sub(a, mul) +pub(super) const unsafe fn simd_fnmsub(a: T, b: T, c: T) -> T { + is::simd_neg(ls::simd_fmsub(a, b, c)) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_nfma(a: T, b: T, c: T) -> T { - let fma: T = crate::intrinsics::simd::simd_fma(a, b, c); - crate::intrinsics::simd::simd_neg(fma) +pub(super) const unsafe fn simd_madd(a: T, b: T, c: T) -> T { + is::simd_add(a, is::simd_mul(b, c)) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_nfms(a: T, b: T, c: T) -> T { - let fma: T = simdl_fms(a, b, c); - crate::intrinsics::simd::simd_neg(fma) +pub(super) const unsafe fn simd_msub(a: T, b: T, c: T) -> T { + is::simd_sub(a, is::simd_mul(b, c)) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_nor(a: T, b: T) -> T { - let or: T = crate::intrinsics::simd::simd_or(a, b); - simdl_not(or) +pub(super) const unsafe fn simd_nor(a: T, b: T) -> T { + ls::simd_not(is::simd_or(a, b)) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_not(a: T) -> T { - let not: T = simdl_splat(!0); - crate::intrinsics::simd::simd_xor(a, not) +pub(super) const unsafe fn simd_not(a: T) -> T { + is::simd_xor(a, ls::simd_splat(!0)) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_orn(a: T, b: T) -> T { - crate::intrinsics::simd::simd_or(a, simdl_not(b)) +pub(super) const unsafe fn simd_orn(a: T, b: T) -> T { + is::simd_or(a, ls::simd_not(b)) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_shl(a: T, b: T) -> T { - let m: T = simdl_splat((size_of::() * 8 - 1) as i64); - let b: T = crate::intrinsics::simd::simd_and(b, m); - crate::intrinsics::simd::simd_shl(a, b) +pub(super) const unsafe fn simd_shl(a: T, b: T) -> T { + let m = (size_of::() * 8 - 1) as i64; + is::simd_shl(a, is::simd_and(b, ls::simd_splat(m))) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_shr(a: T, b: T) -> T { - let m: T = simdl_splat((size_of::() * 8 - 1) as i64); - let b: T = crate::intrinsics::simd::simd_and(b, m); - crate::intrinsics::simd::simd_shr(a, b) +pub(super) const unsafe fn simd_shr(a: T, b: T) -> T { + let m = (size_of::() * 8 - 1) as i64; + is::simd_shr(a, is::simd_and(b, ls::simd_splat(m))) } #[inline(always)] #[rustc_const_unstable(feature = "stdarch_const_helpers", issue = "none")] -pub(super) const unsafe fn simdl_splat(a: i64) -> T { +pub(super) const unsafe fn simd_splat(a: i64) -> T { T::splat(a) } macro_rules! impl_vv { - ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ty) => { + ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ty) => { #[inline(always)] #[target_feature(enable = $ft)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -138,7 +137,7 @@ macro_rules! impl_vv { pub(super) use impl_vv; macro_rules! impl_gv { - ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $gty:ty) => { + ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $gty:ty) => { #[inline(always)] #[target_feature(enable = $ft)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -154,7 +153,7 @@ macro_rules! impl_gv { pub(super) use impl_gv; macro_rules! impl_sv { - ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $ibs:expr) => { + ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $ibs:expr) => { #[inline(always)] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(0)] @@ -172,7 +171,7 @@ macro_rules! impl_sv { pub(super) use impl_sv; macro_rules! impl_vvv { - ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ty) => { + ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ty) => { #[inline(always)] #[target_feature(enable = $ft)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -190,22 +189,22 @@ macro_rules! impl_vvv { pub(super) use impl_vvv; macro_rules! impl_vuv { - ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident) => { + ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident) => { #[inline(always)] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn $name(a: $oty) -> $oty { - static_assert_uimm_bits!(IMM, (size_of::<<$ity as SimdL>::Elem>() * 8).ilog2()); + static_assert_uimm_bits!(IMM, (size_of::<<$ity as SimdExt>::Elem>() * 8).ilog2()); unsafe { let a: $ity = transmute(a); - let b: $ity = simdl_splat(IMM.into()); + let b: $ity = ls::simd_splat(IMM.into()); let r: $ity = $op(a, b); transmute(r) } } }; - ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $ibs:expr) => { + ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $ibs:expr) => { #[inline(always)] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(1)] @@ -214,7 +213,7 @@ macro_rules! impl_vuv { static_assert_uimm_bits!(IMM, $ibs); unsafe { let a: $ity = transmute(a); - let b: $ity = simdl_splat(IMM.into()); + let b: $ity = ls::simd_splat(IMM.into()); let r: $ity = $op(a, b); transmute(r) } @@ -225,7 +224,7 @@ macro_rules! impl_vuv { pub(super) use impl_vuv; macro_rules! impl_vug { - ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $gty:ty, $ibs:expr) => { + ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $gty:ty, $ibs:expr) => { #[inline(always)] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(1)] @@ -234,7 +233,7 @@ macro_rules! impl_vug { static_assert_uimm_bits!(IMM, $ibs); unsafe { let a: $ity = transmute(a); - let r: <$ity as SimdL>::Elem = $op(a, IMM); + let r: <$ity as SimdExt>::Elem = $op(a, IMM); r as $gty } } @@ -244,7 +243,7 @@ macro_rules! impl_vug { pub(super) use impl_vug; macro_rules! impl_vsv { - ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $ibs:expr) => { + ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $ibs:expr) => { #[inline(always)] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(1)] @@ -253,7 +252,7 @@ macro_rules! impl_vsv { static_assert_simm_bits!(IMM, $ibs); unsafe { let a: $ity = transmute(a); - let b: $ity = simdl_splat(IMM.into()); + let b: $ity = ls::simd_splat(IMM.into()); let r: $ity = $op(a, b); transmute(r) } @@ -264,7 +263,7 @@ macro_rules! impl_vsv { pub(super) use impl_vsv; macro_rules! impl_vvvv { - ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ty) => { + ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ty) => { #[inline(always)] #[target_feature(enable = $ft)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -283,7 +282,7 @@ macro_rules! impl_vvvv { pub(super) use impl_vvvv; macro_rules! impl_vugv { - ($ft:literal, $name:ident, $op:ident, $oty:ty, $ity:ident, $gty:ty, $ibs:expr) => { + ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $gty:ty, $ibs:expr) => { #[inline(always)] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(1)] @@ -292,7 +291,7 @@ macro_rules! impl_vugv { static_assert_uimm_bits!(IMM, $ibs); unsafe { let a: $ity = transmute(a); - let r: $ity = $op(a, IMM, b as <$ity as SimdL>::Elem); + let r: $ity = $op(a, IMM, b as <$ity as SimdExt>::Elem); transmute(r) } } From e8b4682aa167e0f06c6cd4d75216e5f418eb9179 Mon Sep 17 00:00:00 2001 From: sayantn Date: Thu, 30 Apr 2026 08:39:24 +0530 Subject: [PATCH 20/30] Handle `Identifier` unsafety in stdarch-gen-arm --- .../crates/stdarch-gen-arm/src/big_endian.rs | 36 +++++++++---------- .../crates/stdarch-gen-arm/src/expression.rs | 13 +++++-- .../crates/stdarch-gen-arm/src/intrinsic.rs | 7 ++-- 3 files changed, 30 insertions(+), 26 deletions(-) diff --git a/library/stdarch/crates/stdarch-gen-arm/src/big_endian.rs b/library/stdarch/crates/stdarch-gen-arm/src/big_endian.rs index b982ff53ec3d2..f024ca074eac6 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/big_endian.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/big_endian.rs @@ -15,9 +15,9 @@ fn create_single_wild_string(name: &str) -> WildString { /// Creates an Identifier with name `name` with no wildcards. This, for example, /// can be used to create variables, function names or arbitrary input. Is is /// extremely flexible. -pub fn create_symbol_identifier(arbitrary_string: &str) -> Expression { +pub fn create_symbol_identifier(arbitrary_string: &str, kind: IdentifierType) -> Expression { let identifier_name = create_single_wild_string(arbitrary_string); - Expression::Identifier(identifier_name, IdentifierType::Symbol) + Expression::Identifier(identifier_name, kind) } /// To compose the simd_shuffle! call we need: @@ -101,7 +101,6 @@ pub fn make_variable_mutable(variable_name: &str, type_kind: &TypeKind) -> Expre fn create_shuffle_internal( variable_name: &String, type_kind: &TypeKind, - fmt_tuple: fn(variable_name: &String, idx: u32, array_lanes: &String) -> String, fmt: fn(variable_name: &String, type_kind: &TypeKind, array_lanes: &String) -> String, ) -> Option { let TypeKind::Vector(vector_type) = type_kind else { @@ -120,14 +119,21 @@ fn create_shuffle_internal( /* .idx = simd_shuffle!(.idx, .idx, []) */ for idx in 0..tuple_count { - let formatted = fmt_tuple(variable_name, idx, &array_lanes); + let formatted = + create_assigned_tuple_shuffle_call_fmt(variable_name, idx, &array_lanes); string_builder += formatted.as_str(); } - Some(create_symbol_identifier(&string_builder)) + Some(create_symbol_identifier( + &string_builder, + IdentifierType::UnsafeSymbol, + )) } else { /* Generate a list of shuffles for each tuple */ let expression = fmt(variable_name, type_kind, &array_lanes); - Some(create_symbol_identifier(&expression)) + Some(create_symbol_identifier( + &expression, + IdentifierType::UnsafeSymbol, + )) } } @@ -137,7 +143,7 @@ fn create_assigned_tuple_shuffle_call_fmt( array_lanes: &String, ) -> String { format!( - "{variable_name}.{idx} = unsafe {{ simd_shuffle!({variable_name}.{idx}, {variable_name}.{idx}, {array_lanes}) }};\n" + "{variable_name}.{idx} = simd_shuffle!({variable_name}.{idx}, {variable_name}.{idx}, {array_lanes});\n" ) } @@ -147,7 +153,7 @@ fn create_assigned_shuffle_call_fmt( array_lanes: &String, ) -> String { format!( - "let {variable_name}: {type_kind} = unsafe {{ simd_shuffle!({variable_name}, {variable_name}, {array_lanes}) }}" + "let {variable_name}: {type_kind} = simd_shuffle!({variable_name}, {variable_name}, {array_lanes})" ) } @@ -165,20 +171,10 @@ pub fn create_assigned_shuffle_call( variable_name: &String, type_kind: &TypeKind, ) -> Option { - create_shuffle_internal( - variable_name, - type_kind, - create_assigned_tuple_shuffle_call_fmt, - create_assigned_shuffle_call_fmt, - ) + create_shuffle_internal(variable_name, type_kind, create_assigned_shuffle_call_fmt) } /// Create a `simd_shuffle!(<...>, [...])` call pub fn create_shuffle_call(variable_name: &String, type_kind: &TypeKind) -> Option { - create_shuffle_internal( - variable_name, - type_kind, - create_assigned_tuple_shuffle_call_fmt, - create_shuffle_call_fmt, - ) + create_shuffle_internal(variable_name, type_kind, create_shuffle_call_fmt) } diff --git a/library/stdarch/crates/stdarch-gen-arm/src/expression.rs b/library/stdarch/crates/stdarch-gen-arm/src/expression.rs index 0b6ffef9d8d3c..daaf7ee6897bd 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/expression.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/expression.rs @@ -23,6 +23,7 @@ use crate::{ pub enum IdentifierType { Variable, Symbol, + UnsafeSymbol, } #[derive(Debug, Clone, Serialize, Deserialize)] @@ -65,7 +66,11 @@ impl FnCall { } pub fn is_expected_call(&self, fn_call_name: &str) -> bool { - if let Expression::Identifier(fn_name, IdentifierType::Symbol) = self.0.as_ref() { + if let Expression::Identifier( + fn_name, + IdentifierType::Symbol | IdentifierType::UnsafeSymbol, + ) = self.0.as_ref() + { fn_name.to_string() == fn_call_name } else { false @@ -298,13 +303,15 @@ impl Expression { match self { // The call will need to be unsafe, but the declaration does not. Self::LLVMLink(..) => false, - // Identifiers, literals and type names are never unsafe. - Self::Identifier(..) => false, + // literals and type names are never unsafe. Self::IntConstant(..) => false, Self::FloatConstant(..) => false, Self::BoolConstant(..) => false, Self::Type(..) => false, Self::ConvertConst(..) => false, + // Only unsafe `Symbol` identifiers are unsafe + Self::Identifier(_, IdentifierType::UnsafeSymbol) => true, + Self::Identifier(..) => false, // Nested structures that aren't inherently unsafe, but could contain other expressions // that might be. Self::Assign(_var, exp) => exp.requires_unsafe_wrapper(ctx_fn), diff --git a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs index 612c913d26d79..ab80b499b05f0 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs @@ -1191,9 +1191,10 @@ impl Intrinsic { * re-assigning each tuple however those generated calls do * not make the parent function return. So we add the return * value here */ - variant - .big_endian_compose - .push(create_symbol_identifier(&ret_val_name)); + variant.big_endian_compose.push(create_symbol_identifier( + &ret_val_name, + IdentifierType::Symbol, + )); } } } From 3bb09d4ec57cc93ceff7d2fe51da2b06952bd925 Mon Sep 17 00:00:00 2001 From: sayantn Date: Thu, 30 Apr 2026 08:39:51 +0530 Subject: [PATCH 21/30] Remove all unneeded `unsafe` from stdarch-gen-arm --- .../core_arch/src/aarch64/neon/generated.rs | 264 +-- .../src/arm_shared/neon/generated.rs | 1470 +++++++++-------- .../spec/neon/aarch64.spec.yml | 120 +- .../spec/neon/arm_shared.spec.yml | 82 +- 4 files changed, 1005 insertions(+), 931 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index 9c27dbdacf58f..fd7a04146bac0 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -4299,8 +4299,8 @@ pub fn vcopy_laneq_f32( ) -> float32x2_t { static_assert_uimm_bits!(LANE1, 1); static_assert_uimm_bits!(LANE2, 2); - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), @@ -4318,9 +4318,9 @@ pub fn vcopy_laneq_f32( pub fn vcopy_laneq_s8(a: int8x8_t, b: int8x16_t) -> int8x8_t { static_assert_uimm_bits!(LANE1, 3); static_assert_uimm_bits!(LANE2, 4); - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -4347,8 +4347,8 @@ pub fn vcopy_laneq_s16( ) -> int16x4_t { static_assert_uimm_bits!(LANE1, 2); static_assert_uimm_bits!(LANE2, 3); - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), @@ -4371,8 +4371,8 @@ pub fn vcopy_laneq_s32( ) -> int32x2_t { static_assert_uimm_bits!(LANE1, 1); static_assert_uimm_bits!(LANE2, 2); - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), @@ -4393,9 +4393,9 @@ pub fn vcopy_laneq_u8( ) -> uint8x8_t { static_assert_uimm_bits!(LANE1, 3); static_assert_uimm_bits!(LANE2, 4); - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -4422,8 +4422,8 @@ pub fn vcopy_laneq_u16( ) -> uint16x4_t { static_assert_uimm_bits!(LANE1, 2); static_assert_uimm_bits!(LANE2, 3); - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), @@ -4446,8 +4446,8 @@ pub fn vcopy_laneq_u32( ) -> uint32x2_t { static_assert_uimm_bits!(LANE1, 1); static_assert_uimm_bits!(LANE2, 2); - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), @@ -4468,9 +4468,9 @@ pub fn vcopy_laneq_p8( ) -> poly8x8_t { static_assert_uimm_bits!(LANE1, 3); static_assert_uimm_bits!(LANE2, 4); - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -4497,8 +4497,8 @@ pub fn vcopy_laneq_p16( ) -> poly16x4_t { static_assert_uimm_bits!(LANE1, 2); static_assert_uimm_bits!(LANE2, 3); - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), @@ -4521,8 +4521,8 @@ pub fn vcopyq_lane_f32( ) -> float32x4_t { static_assert_uimm_bits!(LANE1, 2); static_assert_uimm_bits!(LANE2, 1); - let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) }; unsafe { + let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -4545,8 +4545,8 @@ pub fn vcopyq_lane_f64( ) -> float64x2_t { static_assert_uimm_bits!(LANE1, 1); static_assert!(LANE2 == 0); - let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) }; unsafe { + let b: float64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -4567,8 +4567,8 @@ pub fn vcopyq_lane_s64( ) -> int64x2_t { static_assert_uimm_bits!(LANE1, 1); static_assert!(LANE2 == 0); - let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) }; unsafe { + let b: int64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -4589,8 +4589,8 @@ pub fn vcopyq_lane_u64( ) -> uint64x2_t { static_assert_uimm_bits!(LANE1, 1); static_assert!(LANE2 == 0); - let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) }; unsafe { + let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -4611,8 +4611,8 @@ pub fn vcopyq_lane_p64( ) -> poly64x2_t { static_assert_uimm_bits!(LANE1, 1); static_assert!(LANE2 == 0); - let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) }; unsafe { + let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]); match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), @@ -4630,9 +4630,9 @@ pub fn vcopyq_lane_p64( pub fn vcopyq_lane_s8(a: int8x16_t, b: int8x8_t) -> int8x16_t { static_assert_uimm_bits!(LANE1, 4); static_assert_uimm_bits!(LANE2, 3); - let b: int8x16_t = - unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }; unsafe { + let b: int8x16_t = + simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b1111 { 0 => simd_shuffle!( a, @@ -5003,8 +5003,8 @@ pub fn vcopyq_lane_s16( ) -> int16x8_t { static_assert_uimm_bits!(LANE1, 3); static_assert_uimm_bits!(LANE2, 2); - let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) }; unsafe { + let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -5031,8 +5031,8 @@ pub fn vcopyq_lane_s32( ) -> int32x4_t { static_assert_uimm_bits!(LANE1, 2); static_assert_uimm_bits!(LANE2, 1); - let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) }; unsafe { + let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -5055,9 +5055,9 @@ pub fn vcopyq_lane_u8( ) -> uint8x16_t { static_assert_uimm_bits!(LANE1, 4); static_assert_uimm_bits!(LANE2, 3); - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }; unsafe { + let b: uint8x16_t = + simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b1111 { 0 => simd_shuffle!( a, @@ -5428,8 +5428,8 @@ pub fn vcopyq_lane_u16( ) -> uint16x8_t { static_assert_uimm_bits!(LANE1, 3); static_assert_uimm_bits!(LANE2, 2); - let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) }; unsafe { + let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -5456,8 +5456,8 @@ pub fn vcopyq_lane_u32( ) -> uint32x4_t { static_assert_uimm_bits!(LANE1, 2); static_assert_uimm_bits!(LANE2, 1); - let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) }; unsafe { + let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]); match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), @@ -5480,9 +5480,9 @@ pub fn vcopyq_lane_p8( ) -> poly8x16_t { static_assert_uimm_bits!(LANE1, 4); static_assert_uimm_bits!(LANE2, 3); - let b: poly8x16_t = - unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) }; unsafe { + let b: poly8x16_t = + simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]); match LANE1 & 0b1111 { 0 => simd_shuffle!( a, @@ -5853,8 +5853,8 @@ pub fn vcopyq_lane_p16( ) -> poly16x8_t { static_assert_uimm_bits!(LANE1, 3); static_assert_uimm_bits!(LANE2, 2); - let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) }; unsafe { + let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]); match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), @@ -11701,8 +11701,8 @@ pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { let mut ret_val: poly64x2x2_t = transmute(vld2q_dup_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -11728,8 +11728,8 @@ pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { #[cfg_attr(test, assert_instr(ld2r))] pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { let mut ret_val: uint64x2x2_t = transmute(vld2q_dup_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } #[doc = "Load multiple 2-element structures to two registers"] @@ -11904,8 +11904,8 @@ pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { #[cfg_attr(test, assert_instr(ld2))] pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { let mut ret_val: poly64x2x2_t = transmute(vld2q_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } #[doc = "Load multiple 2-element structures to two registers"] @@ -12085,9 +12085,9 @@ pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { let mut ret_val: poly64x2x3_t = transmute(vld3q_dup_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] @@ -12113,9 +12113,9 @@ pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { #[cfg_attr(test, assert_instr(ld3r))] pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { let mut ret_val: uint64x2x3_t = transmute(vld3q_dup_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } #[doc = "Load multiple 3-element structures to three registers"] @@ -12293,9 +12293,9 @@ pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { #[cfg_attr(test, assert_instr(ld3))] pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { let mut ret_val: poly64x2x3_t = transmute(vld3q_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } #[doc = "Load multiple 3-element structures to three registers"] @@ -12477,10 +12477,10 @@ pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { let mut ret_val: poly64x2x4_t = transmute(vld4q_dup_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] @@ -12506,10 +12506,10 @@ pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { let mut ret_val: uint64x2x4_t = transmute(vld4q_dup_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } #[doc = "Load multiple 4-element structures to four registers"] @@ -12690,10 +12690,10 @@ pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { #[cfg_attr(test, assert_instr(ld4))] pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { let mut ret_val: poly64x2x4_t = transmute(vld4q_s64(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } #[doc = "Load multiple 4-element structures to four registers"] @@ -19951,8 +19951,8 @@ pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vrbit_s8(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -19975,9 +19975,9 @@ pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(vrbitq_s8(transmute(a))); simd_shuffle!( ret_val, @@ -20004,8 +20004,8 @@ pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(vrbit_s8(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -20028,9 +20028,9 @@ pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(vrbitq_s8(transmute(a))); simd_shuffle!( ret_val, @@ -20270,8 +20270,10 @@ pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t { #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"] @@ -20293,8 +20295,8 @@ pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t { #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -20344,8 +20346,8 @@ pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t { #[cfg(not(target_arch = "arm64ec"))] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -20391,8 +20393,10 @@ pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] @@ -20412,8 +20416,10 @@ pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] @@ -20433,8 +20439,8 @@ pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -20457,8 +20463,8 @@ pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -20715,8 +20721,10 @@ pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] @@ -20736,8 +20744,8 @@ pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -20760,8 +20768,8 @@ pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -20788,8 +20796,8 @@ pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -20812,8 +20820,8 @@ pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -20836,8 +20844,8 @@ pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -20860,8 +20868,8 @@ pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -20888,8 +20896,8 @@ pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -20912,8 +20920,8 @@ pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -20936,8 +20944,8 @@ pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -20960,8 +20968,8 @@ pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -20988,8 +20996,8 @@ pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -21012,8 +21020,8 @@ pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { - let a: float64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21036,8 +21044,10 @@ pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] @@ -21057,9 +21067,9 @@ pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21082,8 +21092,10 @@ pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] @@ -21103,8 +21115,8 @@ pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21127,8 +21139,10 @@ pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] @@ -21148,8 +21162,8 @@ pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21190,8 +21204,8 @@ pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21214,8 +21228,8 @@ pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21238,8 +21252,10 @@ pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] @@ -21259,9 +21275,9 @@ pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21284,8 +21300,10 @@ pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] @@ -21305,8 +21323,8 @@ pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21329,8 +21347,10 @@ pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] @@ -21350,8 +21370,8 @@ pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21392,8 +21412,8 @@ pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21416,8 +21436,8 @@ pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21440,8 +21460,10 @@ pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] @@ -21461,9 +21483,9 @@ pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21486,8 +21508,10 @@ pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] @@ -21507,8 +21531,8 @@ pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21581,8 +21605,8 @@ pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -21605,8 +21629,8 @@ pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21629,8 +21653,8 @@ pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -21653,8 +21677,8 @@ pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index d31451c4a7892..28a978fdbbc3e 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -7309,8 +7309,8 @@ pub fn vclz_u16(a: uint16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vclz_u16(a: uint16x4_t) -> uint16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint16x4_t = transmute(vclz_s16(transmute(a))); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -7357,8 +7357,8 @@ pub fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint16x8_t = transmute(vclzq_s16(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -7405,8 +7405,8 @@ pub fn vclz_u32(a: uint32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vclz_u32(a: uint32x2_t) -> uint32x2_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint32x2_t = transmute(vclz_s32(transmute(a))); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -7453,8 +7453,8 @@ pub fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint32x4_t = transmute(vclzq_s32(transmute(a))); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -7501,8 +7501,8 @@ pub fn vclz_u8(a: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vclz_u8(a: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vclz_s8(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -7549,9 +7549,9 @@ pub fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(vclzq_s8(transmute(a))); simd_shuffle!( ret_val, @@ -7644,8 +7644,8 @@ pub fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vcnt_s8(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -7692,9 +7692,9 @@ pub fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(vcntq_s8(transmute(a))); simd_shuffle!( ret_val, @@ -7745,8 +7745,8 @@ pub fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(vcnt_s8(transmute(a))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -7793,9 +7793,9 @@ pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(vcntq_s8(transmute(a))); simd_shuffle!( ret_val, @@ -9620,10 +9620,10 @@ pub fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> )] pub fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: int8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: int8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let c: int32x2_t = transmute(c); let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); let ret_val: int32x2_t = vdot_s32(a, b, transmute(c)); @@ -9681,11 +9681,11 @@ pub fn vdotq_lane_s32(a: int32x4_t, b: int8x16_t, c: int8x8_t) )] pub fn vdotq_lane_s32(a: int32x4_t, b: int8x16_t, c: int8x8_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: int8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: int8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let c: int32x2_t = transmute(c); let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -9743,10 +9743,10 @@ pub fn vdot_lane_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) )] pub fn vdot_lane_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 1); - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let c: uint32x2_t = transmute(c); let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); let ret_val: uint32x2_t = vdot_u32(a, b, transmute(c)); @@ -9804,11 +9804,11 @@ pub fn vdotq_lane_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x8_ )] pub fn vdotq_lane_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x8_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 1); - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let c: uint32x2_t = transmute(c); let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -9852,11 +9852,11 @@ pub fn vdot_laneq_s32(a: int32x2_t, b: int8x8_t, c: int8x16_t) #[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] pub fn vdot_laneq_s32(a: int32x2_t, b: int8x8_t, c: int8x16_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: int8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: int8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let c: int32x4_t = transmute(c); let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); let ret_val: int32x2_t = vdot_s32(a, b, transmute(c)); @@ -9900,12 +9900,12 @@ pub fn vdotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t #[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] pub fn vdotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: int8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: int8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let c: int32x4_t = transmute(c); let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -9949,11 +9949,11 @@ pub fn vdot_laneq_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x16_ #[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] pub fn vdot_laneq_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x16_t) -> uint32x2_t { static_assert_uimm_bits!(LANE, 2); - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let c: uint32x4_t = transmute(c); let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); let ret_val: uint32x2_t = vdot_u32(a, b, transmute(c)); @@ -9997,12 +9997,12 @@ pub fn vdotq_laneq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x1 #[unstable(feature = "stdarch_neon_dotprod", issue = "117224")] pub fn vdotq_laneq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { static_assert_uimm_bits!(LANE, 2); - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let c: uint32x4_t = transmute(c); let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -19057,8 +19057,8 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { )] pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { let mut ret_val: uint8x8x2_t = transmute(vld2_dup_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -19108,20 +19108,16 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { )] pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { let mut ret_val: uint8x16x2_t = transmute(vld2q_dup_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -19171,8 +19167,8 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { )] pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { let mut ret_val: uint16x4x2_t = transmute(vld2_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -19222,8 +19218,8 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { )] pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { let mut ret_val: uint16x8x2_t = transmute(vld2q_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -19273,8 +19269,8 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { )] pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { let mut ret_val: uint32x2x2_t = transmute(vld2_dup_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -19324,8 +19320,8 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { )] pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { let mut ret_val: uint32x4x2_t = transmute(vld2q_dup_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -19375,8 +19371,8 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { )] pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { let mut ret_val: poly8x8x2_t = transmute(vld2_dup_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -19426,20 +19422,16 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { )] pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { let mut ret_val: poly8x16x2_t = transmute(vld2q_dup_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -19489,8 +19481,8 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { )] pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { let mut ret_val: poly16x4x2_t = transmute(vld2_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -19540,8 +19532,8 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { )] pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { let mut ret_val: poly16x8x2_t = transmute(vld2q_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 2-element structure and replicate to all lanes of two registers"] @@ -21336,9 +21328,9 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { )] pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { let mut ret_val: uint8x8x3_t = transmute(vld3_dup_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] @@ -21388,27 +21380,21 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { )] pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { let mut ret_val: uint8x16x3_t = transmute(vld3q_dup_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] @@ -21458,9 +21444,9 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { )] pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { let mut ret_val: uint16x4x3_t = transmute(vld3_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] @@ -21510,9 +21496,9 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { )] pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { let mut ret_val: uint16x8x3_t = transmute(vld3q_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] @@ -21562,9 +21548,9 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { )] pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { let mut ret_val: uint32x2x3_t = transmute(vld3_dup_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] @@ -21614,9 +21600,9 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { )] pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { let mut ret_val: uint32x4x3_t = transmute(vld3q_dup_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] @@ -21666,9 +21652,9 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { )] pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { let mut ret_val: poly8x8x3_t = transmute(vld3_dup_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] @@ -21718,27 +21704,21 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { )] pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { let mut ret_val: poly8x16x3_t = transmute(vld3q_dup_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] @@ -21788,9 +21768,9 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { )] pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { let mut ret_val: poly16x4x3_t = transmute(vld3_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of three registers"] @@ -21840,9 +21820,9 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { )] pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { let mut ret_val: poly16x8x3_t = transmute(vld3q_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 3-element structure and replicate to all lanes of two registers"] @@ -23617,10 +23597,10 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { )] pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { let mut ret_val: uint8x8x4_t = transmute(vld4_dup_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] @@ -23670,34 +23650,26 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { )] pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { let mut ret_val: uint8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.3 = unsafe { - simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] @@ -23747,10 +23719,10 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { )] pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { let mut ret_val: uint16x4x4_t = transmute(vld4_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] @@ -23800,10 +23772,10 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { )] pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { let mut ret_val: uint16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] @@ -23853,10 +23825,10 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { )] pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { let mut ret_val: uint32x2x4_t = transmute(vld4_dup_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [1, 0]); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] @@ -23906,10 +23878,10 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { )] pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { let mut ret_val: uint32x4x4_t = transmute(vld4q_dup_s32(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] @@ -23959,10 +23931,10 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { )] pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { let mut ret_val: poly8x8x4_t = transmute(vld4_dup_s8(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] @@ -24012,34 +23984,26 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { )] pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { let mut ret_val: poly8x16x4_t = transmute(vld4q_dup_s8(transmute(a))); - ret_val.0 = unsafe { - simd_shuffle!( - ret_val.0, - ret_val.0, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.1 = unsafe { - simd_shuffle!( - ret_val.1, - ret_val.1, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.2 = unsafe { - simd_shuffle!( - ret_val.2, - ret_val.2, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; - ret_val.3 = unsafe { - simd_shuffle!( - ret_val.3, - ret_val.3, - [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] - ) - }; + ret_val.0 = simd_shuffle!( + ret_val.0, + ret_val.0, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.1 = simd_shuffle!( + ret_val.1, + ret_val.1, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.2 = simd_shuffle!( + ret_val.2, + ret_val.2, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); + ret_val.3 = simd_shuffle!( + ret_val.3, + ret_val.3, + [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0] + ); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] @@ -24089,10 +24053,10 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { )] pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { let mut ret_val: poly16x4x4_t = transmute(vld4_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [3, 2, 1, 0]); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of four registers"] @@ -24142,10 +24106,10 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { )] pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { let mut ret_val: poly16x8x4_t = transmute(vld4q_dup_s16(transmute(a))); - ret_val.0 = unsafe { simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.1 = unsafe { simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.2 = unsafe { simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - ret_val.3 = unsafe { simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; + ret_val.0 = simd_shuffle!(ret_val.0, ret_val.0, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.1 = simd_shuffle!(ret_val.1, ret_val.1, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.2 = simd_shuffle!(ret_val.2, ret_val.2, [7, 6, 5, 4, 3, 2, 1, 0]); + ret_val.3 = simd_shuffle!(ret_val.3, ret_val.3, [7, 6, 5, 4, 3, 2, 1, 0]); ret_val } #[doc = "Load single 4-element structure and replicate to all lanes of two registers"] @@ -39548,9 +39512,9 @@ pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vraddhn_s16(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -39597,9 +39561,9 @@ pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); let ret_val: uint16x4_t = transmute(vraddhn_s32(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -39646,9 +39610,9 @@ pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); let ret_val: uint32x2_t = transmute(vraddhn_s64(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -39991,8 +39955,8 @@ pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -40041,8 +40005,8 @@ pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -40091,8 +40055,8 @@ pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -40141,8 +40105,8 @@ pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -40191,8 +40155,10 @@ pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] @@ -40238,8 +40204,8 @@ pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -40288,8 +40254,8 @@ pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -40338,8 +40304,8 @@ pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -40388,8 +40354,10 @@ pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] @@ -40435,8 +40403,8 @@ pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -40485,8 +40453,8 @@ pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -40535,8 +40503,8 @@ pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -40585,8 +40553,8 @@ pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -40639,8 +40607,8 @@ pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -40689,8 +40657,8 @@ pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -40739,8 +40707,8 @@ pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -40789,8 +40757,8 @@ pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -40843,8 +40811,8 @@ pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -40893,8 +40861,8 @@ pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -40943,8 +40911,8 @@ pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -40993,8 +40961,8 @@ pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -41047,8 +41015,8 @@ pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -41097,8 +41065,8 @@ pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -41147,8 +41115,8 @@ pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -41197,8 +41165,8 @@ pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -41247,9 +41215,9 @@ pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -41298,8 +41266,8 @@ pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -41348,8 +41316,8 @@ pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -41398,8 +41366,8 @@ pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -41448,8 +41416,8 @@ pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -41547,8 +41515,8 @@ pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -41597,8 +41565,8 @@ pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -41647,9 +41615,9 @@ pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -41698,8 +41666,8 @@ pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -41748,8 +41716,8 @@ pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -41798,8 +41766,8 @@ pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -41848,8 +41816,8 @@ pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -41947,8 +41915,8 @@ pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -41997,8 +41965,8 @@ pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -42047,9 +42015,9 @@ pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -42098,8 +42066,8 @@ pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -42148,8 +42116,8 @@ pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -42247,8 +42215,10 @@ pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { - let a: float16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] @@ -42294,8 +42264,10 @@ pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] @@ -42341,8 +42313,8 @@ pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { - let a: float16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: float16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -42440,8 +42412,8 @@ pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { )] #[cfg(not(target_arch = "arm64ec"))] pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -42535,8 +42507,8 @@ pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -42583,8 +42555,8 @@ pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -42631,8 +42603,8 @@ pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -42679,8 +42651,10 @@ pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] @@ -42724,8 +42698,8 @@ pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -42772,8 +42746,8 @@ pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -42820,8 +42794,8 @@ pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -42868,8 +42842,10 @@ pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] @@ -42913,8 +42889,8 @@ pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -42961,8 +42937,8 @@ pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { - let a: float32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: float32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -43009,8 +42985,10 @@ pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] @@ -43054,8 +43032,8 @@ pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -43106,8 +43084,8 @@ pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -43154,8 +43132,8 @@ pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -43202,8 +43180,8 @@ pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -43250,8 +43228,8 @@ pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -43302,8 +43280,8 @@ pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -43350,8 +43328,8 @@ pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -43398,8 +43376,8 @@ pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -43446,8 +43424,8 @@ pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -43498,8 +43476,8 @@ pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { - let a: float32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: float32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -43546,8 +43524,8 @@ pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -43594,8 +43572,8 @@ pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -43642,8 +43620,8 @@ pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -43690,8 +43668,10 @@ pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] @@ -43735,8 +43715,8 @@ pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -43783,8 +43763,8 @@ pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -43831,8 +43811,8 @@ pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -43879,8 +43859,10 @@ pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] @@ -43924,8 +43906,8 @@ pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -43972,8 +43954,8 @@ pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -44020,9 +44002,9 @@ pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -44069,9 +44051,9 @@ pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -44118,9 +44100,9 @@ pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -44167,9 +44149,9 @@ pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -44216,9 +44198,9 @@ pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -44269,9 +44251,9 @@ pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -44318,9 +44300,9 @@ pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -44367,9 +44349,9 @@ pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -44416,9 +44398,9 @@ pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -44469,9 +44451,9 @@ pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -44518,8 +44500,8 @@ pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -44566,8 +44548,8 @@ pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -44614,8 +44596,8 @@ pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -44662,8 +44644,10 @@ pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] @@ -44707,8 +44691,8 @@ pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -44755,8 +44739,8 @@ pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -44803,8 +44787,8 @@ pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -44851,8 +44835,10 @@ pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] @@ -44896,8 +44882,8 @@ pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -44944,8 +44930,8 @@ pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -44992,8 +44978,8 @@ pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -45040,8 +45026,8 @@ pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -45092,8 +45078,8 @@ pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -45140,8 +45126,8 @@ pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -45188,8 +45174,8 @@ pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -45240,8 +45226,8 @@ pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -45288,8 +45274,8 @@ pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -45336,8 +45322,8 @@ pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -45384,8 +45370,8 @@ pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -45436,8 +45422,8 @@ pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -45484,8 +45470,8 @@ pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -45532,8 +45518,8 @@ pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -45580,8 +45566,8 @@ pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -45628,8 +45614,10 @@ pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] @@ -45673,8 +45661,8 @@ pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -45721,8 +45709,8 @@ pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -45769,8 +45757,8 @@ pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -45817,8 +45805,10 @@ pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] @@ -45862,8 +45852,8 @@ pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -45910,8 +45900,8 @@ pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -45958,8 +45948,8 @@ pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -46006,8 +45996,8 @@ pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -46058,8 +46048,8 @@ pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -46106,8 +46096,8 @@ pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -46154,8 +46144,8 @@ pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -46206,8 +46196,8 @@ pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -46254,8 +46244,8 @@ pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -46302,8 +46292,8 @@ pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -46350,8 +46340,8 @@ pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -46402,8 +46392,8 @@ pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -46894,8 +46884,8 @@ pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -46942,8 +46932,8 @@ pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -46994,8 +46984,8 @@ pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -47042,8 +47032,8 @@ pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -47090,8 +47080,8 @@ pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -47142,8 +47132,8 @@ pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -47190,8 +47180,8 @@ pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -47238,8 +47228,8 @@ pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -47286,8 +47276,8 @@ pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -47338,8 +47328,8 @@ pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -47386,8 +47376,8 @@ pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -47434,8 +47424,8 @@ pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -47482,8 +47472,8 @@ pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -47530,8 +47520,8 @@ pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -47578,8 +47568,10 @@ pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] @@ -47623,8 +47615,8 @@ pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -47671,8 +47663,8 @@ pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -47719,8 +47711,10 @@ pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] @@ -47764,8 +47758,8 @@ pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -47812,8 +47806,8 @@ pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -47860,9 +47854,9 @@ pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -47909,9 +47903,9 @@ pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -47962,9 +47956,9 @@ pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -48011,9 +48005,9 @@ pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -48060,9 +48054,9 @@ pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -48109,9 +48103,9 @@ pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -48158,9 +48152,9 @@ pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -48207,9 +48201,9 @@ pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -48256,9 +48250,9 @@ pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -48309,9 +48303,9 @@ pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -48358,8 +48352,8 @@ pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -48406,8 +48400,8 @@ pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -48454,8 +48448,8 @@ pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -48502,8 +48496,8 @@ pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -48550,8 +48544,10 @@ pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] @@ -48595,8 +48591,8 @@ pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -48643,8 +48639,8 @@ pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -48691,8 +48687,10 @@ pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] @@ -48736,8 +48734,8 @@ pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -48784,8 +48782,8 @@ pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -48832,8 +48830,8 @@ pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -48880,8 +48878,8 @@ pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -48932,8 +48930,8 @@ pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -48980,8 +48978,8 @@ pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -49028,8 +49026,8 @@ pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -49076,8 +49074,8 @@ pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -49128,8 +49126,8 @@ pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -49176,8 +49174,8 @@ pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -49224,8 +49222,8 @@ pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -49276,8 +49274,8 @@ pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -49324,8 +49322,8 @@ pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -49372,8 +49370,8 @@ pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -49420,8 +49418,8 @@ pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -49468,8 +49466,8 @@ pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -49516,8 +49514,10 @@ pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] @@ -49561,8 +49561,8 @@ pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -49609,8 +49609,8 @@ pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -49657,8 +49657,10 @@ pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] @@ -49702,8 +49704,8 @@ pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -49750,8 +49752,8 @@ pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -49798,8 +49800,8 @@ pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -49846,8 +49848,8 @@ pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -49898,8 +49900,8 @@ pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -49946,8 +49948,8 @@ pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -49994,8 +49996,8 @@ pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -50042,8 +50044,8 @@ pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -50094,8 +50096,8 @@ pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -50142,8 +50144,8 @@ pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -50190,8 +50192,8 @@ pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -50242,8 +50244,8 @@ pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -50734,8 +50736,8 @@ pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -50782,8 +50784,8 @@ pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -50834,8 +50836,8 @@ pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -50882,8 +50884,8 @@ pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -50930,8 +50932,8 @@ pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -50978,8 +50980,8 @@ pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -51030,8 +51032,8 @@ pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -51078,8 +51080,8 @@ pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -51126,8 +51128,8 @@ pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -51178,8 +51180,8 @@ pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -51226,8 +51228,8 @@ pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -51274,8 +51276,8 @@ pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -51322,8 +51324,8 @@ pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -51370,8 +51372,8 @@ pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -51418,8 +51420,10 @@ pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] @@ -51463,8 +51467,8 @@ pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -51511,8 +51515,8 @@ pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -51559,8 +51563,8 @@ pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -51607,8 +51611,10 @@ pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] @@ -51652,8 +51658,8 @@ pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -51700,9 +51706,9 @@ pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -51749,9 +51755,9 @@ pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -51802,9 +51808,9 @@ pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -51851,9 +51857,9 @@ pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -51900,9 +51906,9 @@ pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -51949,9 +51955,9 @@ pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -52002,9 +52008,9 @@ pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -52051,9 +52057,9 @@ pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -52100,9 +52106,9 @@ pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -52149,9 +52155,9 @@ pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -52198,8 +52204,8 @@ pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: float32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -52246,8 +52252,8 @@ pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -52294,8 +52300,8 @@ pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -52342,8 +52348,8 @@ pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: int32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -52390,8 +52396,10 @@ pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] @@ -52435,8 +52443,8 @@ pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -52483,8 +52491,8 @@ pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint16x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -52531,8 +52539,8 @@ pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: uint32x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -52579,8 +52587,10 @@ pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] @@ -52624,8 +52634,8 @@ pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -52672,8 +52682,8 @@ pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: float32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -52720,8 +52730,8 @@ pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -52772,8 +52782,8 @@ pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -52820,8 +52830,8 @@ pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -52868,8 +52878,8 @@ pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -52916,8 +52926,8 @@ pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -52968,8 +52978,8 @@ pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -53016,8 +53026,8 @@ pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -53064,8 +53074,8 @@ pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -53112,8 +53122,8 @@ pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -53693,8 +53703,10 @@ pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] @@ -53738,9 +53750,11 @@ pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] @@ -53784,9 +53798,9 @@ pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { - let a: int8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -53833,8 +53847,10 @@ pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { - let a: int16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] @@ -53878,8 +53894,10 @@ pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] @@ -53923,8 +53941,8 @@ pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { - let a: int16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -53971,8 +53989,10 @@ pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] @@ -54016,8 +54036,10 @@ pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] @@ -54061,8 +54083,8 @@ pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -54109,8 +54131,10 @@ pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { - let a: int64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: int64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] @@ -54154,8 +54178,10 @@ pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] @@ -54199,9 +54225,11 @@ pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] @@ -54245,9 +54273,9 @@ pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { - let a: uint8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -54294,8 +54322,10 @@ pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { - let a: uint16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] @@ -54339,8 +54369,10 @@ pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] @@ -54384,8 +54416,8 @@ pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -54432,8 +54464,10 @@ pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { - let a: uint32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint32x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] @@ -54477,8 +54511,10 @@ pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] @@ -54522,8 +54558,8 @@ pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -54570,8 +54606,10 @@ pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] @@ -54615,8 +54653,10 @@ pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] @@ -54660,9 +54700,11 @@ pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] @@ -54706,9 +54748,9 @@ pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { - let a: poly8x16_t = - unsafe { simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x16_t = + simd_shuffle!(a, a, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -54755,8 +54797,10 @@ pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { - let a: poly16x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly16x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] @@ -54800,8 +54844,10 @@ pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] @@ -54845,8 +54891,8 @@ pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { - let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly64x2_t = transmute(a); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -55269,8 +55315,10 @@ pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - unsafe { transmute(a) } + unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); + transmute(a) + } } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] @@ -55314,8 +55362,8 @@ pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -55366,8 +55414,8 @@ pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -55414,8 +55462,8 @@ pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: int32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -55462,8 +55510,8 @@ pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -55514,8 +55562,8 @@ pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -55562,8 +55610,8 @@ pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: uint32x4_t = transmute(a); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -55610,8 +55658,8 @@ pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly8x16_t = transmute(a); simd_shuffle!( ret_val, @@ -55662,8 +55710,8 @@ pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { - let a: poly64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; unsafe { + let a: poly64x2_t = simd_shuffle!(a, a, [1, 0]); let ret_val: poly16x8_t = transmute(a); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -58727,9 +58775,9 @@ pub fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { - let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint16x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint16x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vrsubhn_s16(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -58776,9 +58824,9 @@ pub fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { - let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [3, 2, 1, 0]) }; unsafe { + let a: uint32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint32x4_t = simd_shuffle!(b, b, [3, 2, 1, 0]); let ret_val: uint16x4_t = transmute(vrsubhn_s32(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [3, 2, 1, 0]) } @@ -58825,9 +58873,9 @@ pub fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800") )] pub fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { - let a: uint64x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [1, 0]) }; unsafe { + let a: uint64x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint64x2_t = simd_shuffle!(b, b, [1, 0]); let ret_val: uint32x2_t = transmute(vrsubhn_s64(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [1, 0]) } @@ -70468,10 +70516,10 @@ pub fn vsudot_lane_s32(a: int32x2_t, b: int8x8_t, c: uint8x8_t) )] pub fn vsudot_lane_s32(a: int32x2_t, b: int8x8_t, c: uint8x8_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: int8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: int8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let c: uint32x2_t = transmute(c); let c: uint32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); let ret_val: int32x2_t = vusdot_s32(a, transmute(c), b); @@ -70529,11 +70577,11 @@ pub fn vsudotq_lane_s32(a: int32x4_t, b: int8x16_t, c: uint8x8_ )] pub fn vsudotq_lane_s32(a: int32x4_t, b: int8x16_t, c: uint8x8_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: int8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: int8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let c: uint32x2_t = transmute(c); let c: uint32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -70630,9 +70678,9 @@ pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbl))] pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vtbl1(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -70659,9 +70707,9 @@ pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbl))] pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(vtbl1(transmute(a), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -70715,10 +70763,10 @@ pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(test, assert_instr(vtbl))] pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { let mut a: uint8x8x2_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -70746,10 +70794,10 @@ pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { #[cfg_attr(test, assert_instr(vtbl))] pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { let mut a: poly8x8x2_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(vtbl2(transmute(a.0), transmute(a.1), transmute(b))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -70810,11 +70858,11 @@ pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(test, assert_instr(vtbl))] pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { let mut a: uint8x8x3_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vtbl3( transmute(a.0), transmute(a.1), @@ -70854,11 +70902,11 @@ pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { #[cfg_attr(test, assert_instr(vtbl))] pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { let mut a: poly8x8x3_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(vtbl3( transmute(a.0), transmute(a.1), @@ -70925,12 +70973,12 @@ pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { #[cfg_attr(test, assert_instr(vtbl))] pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { let mut a: uint8x8x4_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vtbl4( transmute(a.0), transmute(a.1), @@ -70972,12 +71020,12 @@ pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { #[cfg_attr(test, assert_instr(vtbl))] pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { let mut a: poly8x8x4_t = a; - a.0 = unsafe { simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.1 = unsafe { simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.2 = unsafe { simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - a.3 = unsafe { simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + a.0 = simd_shuffle!(a.0, a.0, [7, 6, 5, 4, 3, 2, 1, 0]); + a.1 = simd_shuffle!(a.1, a.1, [7, 6, 5, 4, 3, 2, 1, 0]); + a.2 = simd_shuffle!(a.2, a.2, [7, 6, 5, 4, 3, 2, 1, 0]); + a.3 = simd_shuffle!(a.3, a.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(vtbl4( transmute(a.0), transmute(a.1), @@ -71032,10 +71080,10 @@ pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbx))] pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vtbx1(transmute(a), transmute(b), transmute(c))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -71060,10 +71108,10 @@ pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] #[cfg_attr(test, assert_instr(vtbx))] pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let b: poly8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + let b: poly8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(vtbx1(transmute(a), transmute(b), transmute(c))); simd_shuffle!(ret_val, ret_val, [7, 6, 5, 4, 3, 2, 1, 0]) } @@ -71120,11 +71168,11 @@ pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(test, assert_instr(vtbx))] pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { let mut b: uint8x8x2_t = b; - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vtbx2( transmute(a), transmute(b.0), @@ -71162,11 +71210,11 @@ pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { #[cfg_attr(test, assert_instr(vtbx))] pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { let mut b: poly8x8x2_t = b; - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(vtbx2( transmute(a), transmute(b.0), @@ -71229,12 +71277,12 @@ pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(test, assert_instr(vtbx))] pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { let mut b: uint8x8x3_t = b; - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vtbx3( transmute(a), transmute(b.0), @@ -71274,12 +71322,12 @@ pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { #[cfg_attr(test, assert_instr(vtbx))] pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { let mut b: poly8x8x3_t = b; - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(vtbx3( transmute(a), transmute(b.0), @@ -71341,13 +71389,13 @@ pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { #[cfg_attr(test, assert_instr(vtbx))] pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { let mut b: int8x8x4_t = b; - let a: int8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: int8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: int8x8_t = vtbx4( a, transmute(b.0), @@ -71389,13 +71437,13 @@ pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { #[cfg_attr(test, assert_instr(vtbx))] pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { let mut b: uint8x8x4_t = b; - let a: uint8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: uint8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: uint8x8_t = transmute(vtbx4( transmute(a), transmute(b.0), @@ -71437,13 +71485,13 @@ pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { #[cfg_attr(test, assert_instr(vtbx))] pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { let mut b: poly8x8x4_t = b; - let a: poly8x8_t = unsafe { simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.0 = unsafe { simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.1 = unsafe { simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.2 = unsafe { simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]) }; - b.3 = unsafe { simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: uint8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: poly8x8_t = simd_shuffle!(a, a, [7, 6, 5, 4, 3, 2, 1, 0]); + b.0 = simd_shuffle!(b.0, b.0, [7, 6, 5, 4, 3, 2, 1, 0]); + b.1 = simd_shuffle!(b.1, b.1, [7, 6, 5, 4, 3, 2, 1, 0]); + b.2 = simd_shuffle!(b.2, b.2, [7, 6, 5, 4, 3, 2, 1, 0]); + b.3 = simd_shuffle!(b.3, b.3, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: uint8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let ret_val: poly8x8_t = transmute(vtbx4( transmute(a), transmute(b.0), @@ -72511,10 +72559,10 @@ pub fn vusdot_lane_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) )] pub fn vusdot_lane_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 1); - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: int8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let c: int32x2_t = transmute(c); let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); let ret_val: int32x2_t = vusdot_s32(a, b, transmute(c)); @@ -72572,11 +72620,11 @@ pub fn vusdotq_lane_s32(a: int32x4_t, b: uint8x16_t, c: int8x8_ )] pub fn vusdotq_lane_s32(a: int32x4_t, b: uint8x16_t, c: int8x8_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 1); - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: int8x8_t = unsafe { simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x8_t = simd_shuffle!(c, c, [7, 6, 5, 4, 3, 2, 1, 0]); let c: int32x2_t = transmute(c); let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); @@ -72620,11 +72668,11 @@ pub fn vusdot_laneq_s32(a: int32x2_t, b: uint8x8_t, c: int8x16_ #[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] pub fn vusdot_laneq_s32(a: int32x2_t, b: uint8x8_t, c: int8x16_t) -> int32x2_t { static_assert_uimm_bits!(LANE, 2); - let a: int32x2_t = unsafe { simd_shuffle!(a, a, [1, 0]) }; - let b: uint8x8_t = unsafe { simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: int8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int32x2_t = simd_shuffle!(a, a, [1, 0]); + let b: uint8x8_t = simd_shuffle!(b, b, [7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let c: int32x4_t = transmute(c); let c: int32x2_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32]); let ret_val: int32x2_t = vusdot_s32(a, b, transmute(c)); @@ -72668,12 +72716,12 @@ pub fn vusdotq_laneq_s32(a: int32x4_t, b: uint8x16_t, c: int8x1 #[unstable(feature = "stdarch_neon_i8mm", issue = "117223")] pub fn vusdotq_laneq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { static_assert_uimm_bits!(LANE, 2); - let a: int32x4_t = unsafe { simd_shuffle!(a, a, [3, 2, 1, 0]) }; - let b: uint8x16_t = - unsafe { simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; - let c: int8x16_t = - unsafe { simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]) }; unsafe { + let a: int32x4_t = simd_shuffle!(a, a, [3, 2, 1, 0]); + let b: uint8x16_t = + simd_shuffle!(b, b, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); + let c: int8x16_t = + simd_shuffle!(c, c, [15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]); let c: int32x4_t = transmute(c); let c: int32x4_t = simd_shuffle!(c, c, [LANE as u32, LANE as u32, LANE as u32, LANE as u32]); diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml index d55ea0b770858..e37f9f124be4c 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/aarch64.spec.yml @@ -2670,11 +2670,11 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [poly64x2_t, ' static_assert_uimm_bits!(N, 1);', 'unsafe { match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), } }'] - - [float64x2_t, ' static_assert_uimm_bits!(N, 1);', 'unsafe { match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), } }'] + - [poly64x2_t, 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [float64x2_t, 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] compose: - - Identifier: ["{type[1]}", Symbol] - - Identifier: ["{type[2]}", Symbol] + - FnCall: [static_assert_uimm_bits!, [N, 1]] + - Identifier: ["{type[1]}", UnsafeSymbol] - name: "vmla{neon_type.no}" doc: "Floating-point multiply-add to accumulator" @@ -8944,19 +8944,19 @@ intrinsics: static_defs: ['const LANE1: i32, const LANE2: i32'] safety: safe types: - - [int8x8_t, int8x8_t, int8x8_t, '3', '3', ' unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [int16x4_t, int16x4_t, int16x4_t, '2', '2', ' unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [int32x2_t, int32x2_t, int32x2_t, '1', '1', ' unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint8x8_t, uint8x8_t, uint8x8_t, '3', '3', ' unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint16x4_t, uint16x4_t, uint16x4_t, '2', '2', ' unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint32x2_t, uint32x2_t, uint32x2_t, '1', '1', ' unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [poly8x8_t, poly8x8_t, poly8x8_t, '3', '3', ' unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [poly16x4_t, poly16x4_t, poly16x4_t, '2', '2', ' unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [float32x2_t, float32x2_t, float32x2_t, '1', '1', ' unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] + - [int8x8_t, int8x8_t, int8x8_t, '3', '3', ' match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int16x4_t, int16x4_t, int16x4_t, '2', '2', ' match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int32x2_t, int32x2_t, int32x2_t, '1', '1', ' match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint8x8_t, uint8x8_t, uint8x8_t, '3', '3', ' match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint16x4_t, uint16x4_t, uint16x4_t, '2', '2', ' match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint32x2_t, uint32x2_t, uint32x2_t, '1', '1', ' match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly8x8_t, poly8x8_t, poly8x8_t, '3', '3', ' match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly16x4_t, poly16x4_t, poly16x4_t, '2', '2', ' match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [float32x2_t, float32x2_t, float32x2_t, '1', '1', ' match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] compose: - FnCall: [static_assert_uimm_bits!, [LANE1, '{type[3]}']] - FnCall: [static_assert_uimm_bits!, [LANE2, '{type[4]}']] - - Identifier: ["{type[5]}", Symbol] + - Identifier: ["{type[5]}", UnsafeSymbol] - name: "vcopy{neon_type[0].lane_nox}" doc: "Insert vector element from another vector element" @@ -8969,19 +8969,19 @@ intrinsics: static_defs: ['const LANE1: i32, const LANE2: i32'] safety: safe types: - - [int8x16_t, int8x8_t, int8x16_t, '4', '3', ' let b: int8x16_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };', 'unsafe { match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [int16x8_t, int16x4_t, int16x8_t, '3', '2', ' let b: int16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };', 'unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [int32x4_t, int32x2_t, int32x4_t, '2', '1', ' let b: int32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };', 'unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint8x16_t, uint8x8_t, uint8x16_t, '4', '3', ' let b: uint8x16_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };', 'unsafe { match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint16x8_t, uint16x4_t, uint16x8_t, '3', '2', ' let b: uint16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };', 'unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint32x4_t, uint32x2_t, uint32x4_t, '2', '1', ' let b: uint32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };', 'unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [poly8x16_t, poly8x8_t, poly8x16_t, '4', '3', ' let b: poly8x16_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };', 'unsafe { match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [poly16x8_t, poly16x4_t, poly16x8_t, '3', '2', ' let b: poly16x8_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]) };', 'unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] + - [int8x16_t, int8x8_t, int8x16_t, '4', '3', ' let b: int8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int16x8_t, int16x4_t, int16x8_t, '3', '2', ' let b: int16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int32x4_t, int32x2_t, int32x4_t, '2', '1', ' let b: int32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint8x16_t, uint8x8_t, uint8x16_t, '4', '3', ' let b: uint8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint16x8_t, uint16x4_t, uint16x8_t, '3', '2', ' let b: uint16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint32x4_t, uint32x2_t, uint32x4_t, '2', '1', ' let b: uint32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly8x16_t, poly8x8_t, poly8x16_t, '4', '3', ' let b: poly8x16_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly16x8_t, poly16x4_t, poly16x8_t, '3', '2', ' let b: poly16x8_t = simd_shuffle!(b, b, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] compose: - FnCall: [static_assert_uimm_bits!, [LANE1, '{type[3]}']] - FnCall: [static_assert_uimm_bits!, [LANE2, '{type[4]}']] - - Identifier: ["{type[5]}", Symbol] - - Identifier: ["{type[6]}", Symbol] + - Identifier: ["{type[5]}", UnsafeSymbol] + - Identifier: ["{type[6]}", UnsafeSymbol] - name: "vcopy{neon_type[0].laneq_nox}" doc: "Insert vector element from another vector element" @@ -8994,23 +8994,23 @@ intrinsics: static_defs: ['const LANE1: i32, const LANE2: i32'] safety: safe types: - - [int8x16_t, int8x16_t, int8x16_t, '4', '4', ' unsafe { match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [int16x8_t, int16x8_t, int16x8_t, '3', '3', ' unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [int32x4_t, int32x4_t, int32x4_t, '2', '2', ' unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [int64x2_t, int64x2_t, int64x2_t, '1', '1', ' unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint8x16_t, uint8x16_t, uint8x16_t, '4', '4', ' unsafe { match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint16x8_t, uint16x8_t, uint16x8_t, '3', '3', ' unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint32x4_t, uint32x4_t, uint32x4_t, '2', '2', ' unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint64x2_t, uint64x2_t, uint64x2_t, '1', '1', ' unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [poly8x16_t, poly8x16_t, poly8x16_t, '4', '4', ' unsafe { match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [poly16x8_t, poly16x8_t, poly16x8_t, '3', '3', ' unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [poly64x2_t, poly64x2_t, poly64x2_t, '1', '1', ' unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [float32x4_t, float32x4_t, float32x4_t, '2', '2', ' unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [float64x2_t, float64x2_t, float64x2_t, '1', '1', ' unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] + - [int8x16_t, int8x16_t, int8x16_t, '4', '4', ' match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int16x8_t, int16x8_t, int16x8_t, '3', '3', ' match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int32x4_t, int32x4_t, int32x4_t, '2', '2', ' match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int64x2_t, int64x2_t, int64x2_t, '1', '1', ' match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint8x16_t, uint8x16_t, uint8x16_t, '4', '4', ' match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint16x8_t, uint16x8_t, uint16x8_t, '3', '3', ' match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint32x4_t, uint32x4_t, uint32x4_t, '2', '2', ' match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint64x2_t, uint64x2_t, uint64x2_t, '1', '1', ' match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly8x16_t, poly8x16_t, poly8x16_t, '4', '4', ' match LANE1 & 0b1111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32, 8, 9, 10, 11, 12, 13, 14, 15]), 8 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 16 + LANE2 as u32, 9, 10, 11, 12, 13, 14, 15]), 9 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 16 + LANE2 as u32, 10, 11, 12, 13, 14, 15]), 10 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 16 + LANE2 as u32, 11, 12, 13, 14, 15]), 11 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 16 + LANE2 as u32, 12, 13, 14, 15]), 12 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 16 + LANE2 as u32, 13, 14, 15]), 13 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 16 + LANE2 as u32, 14, 15]), 14 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 16 + LANE2 as u32, 15]), 15 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly16x8_t, poly16x8_t, poly16x8_t, '3', '3', ' match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 8 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 8 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 8 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly64x2_t, poly64x2_t, poly64x2_t, '1', '1', ' match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [float32x4_t, float32x4_t, float32x4_t, '2', '2', ' match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [float64x2_t, float64x2_t, float64x2_t, '1', '1', ' match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] compose: - FnCall: [static_assert_uimm_bits!, [LANE1, '{type[3]}']] - FnCall: [static_assert_uimm_bits!, [LANE2, '{type[4]}']] - - Identifier: ["{type[5]}", Symbol] + - Identifier: ["{type[5]}", UnsafeSymbol] - name: "vcopy{neon_type[0].laneq_nox}" doc: "Insert vector element from another vector element" @@ -9023,20 +9023,20 @@ intrinsics: static_defs: ['const LANE1: i32, const LANE2: i32'] safety: safe types: - - [int8x8_t, int8x16_t, int8x8_t, '3', '4', ' let a: int8x16_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };', 'unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [int16x4_t, int16x8_t, int16x4_t, '2', '3', ' let a: int16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };', 'unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [int32x2_t, int32x4_t, int32x2_t, '1', '2', ' let a: int32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };', 'unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint8x8_t, uint8x16_t, uint8x8_t, '3', '4', ' let a: uint8x16_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };', 'unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint16x4_t, uint16x8_t, uint16x4_t, '2', '3', ' let a: uint16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };', 'unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint32x2_t, uint32x4_t, uint32x2_t, '1', '2', 'let a: uint32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };', 'unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [poly8x8_t, poly8x16_t, poly8x8_t, '3', '4', ' let a: poly8x16_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]) };', 'unsafe { match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [poly16x4_t, poly16x8_t, poly16x4_t, '2', '3', ' let a: poly16x8_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]) };', 'unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [float32x2_t, float32x4_t, float32x2_t, '1', '2', ' let a: float32x4_t = unsafe { simd_shuffle!(a, a, [0, 1, 2, 3]) };', 'unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] + - [int8x8_t, int8x16_t, int8x8_t, '3', '4', ' let a: int8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int16x4_t, int16x8_t, int16x4_t, '2', '3', ' let a: int16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [int32x2_t, int32x4_t, int32x2_t, '1', '2', ' let a: int32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint8x8_t, uint8x16_t, uint8x8_t, '3', '4', ' let a: uint8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint16x4_t, uint16x8_t, uint16x4_t, '2', '3', ' let a: uint16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint32x2_t, uint32x4_t, uint32x2_t, '1', '2', 'let a: uint32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly8x8_t, poly8x16_t, poly8x8_t, '3', '4', ' let a: poly8x16_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]);', 'match LANE1 & 0b111 { 0 => simd_shuffle!(a, b, [16 + LANE2 as u32, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [0, 16 + LANE2 as u32, 2, 3, 4, 5, 6, 7]), 2 => simd_shuffle!(a, b, [0, 1, 16 + LANE2 as u32, 3, 4, 5, 6, 7]), 3 => simd_shuffle!(a, b, [0, 1, 2, 16 + LANE2 as u32, 4, 5, 6, 7]), 4 => simd_shuffle!(a, b, [0, 1, 2, 3, 16 + LANE2 as u32, 5, 6, 7]), 5 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 16 + LANE2 as u32, 6, 7]), 6 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 16 + LANE2 as u32, 7]), 7 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 16 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly16x4_t, poly16x8_t, poly16x4_t, '2', '3', ' let a: poly16x8_t = simd_shuffle!(a, a, [0, 1, 2, 3, 4, 5, 6, 7]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [8 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 8 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 8 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 8 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [float32x2_t, float32x4_t, float32x2_t, '1', '2', ' let a: float32x4_t = simd_shuffle!(a, a, [0, 1, 2, 3]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] compose: - FnCall: [static_assert_uimm_bits!, [LANE1, '{type[3]}']] - FnCall: [static_assert_uimm_bits!, [LANE2, '{type[4]}']] - - Identifier: ["{type[5]}", Symbol] - - Identifier: ["{type[6]}", Symbol] + - Identifier: ["{type[5]}", UnsafeSymbol] + - Identifier: ["{type[6]}", UnsafeSymbol] - name: "vcopyq_lane_{neon_type[0]}" doc: "Insert vector element from another vector element" @@ -9049,15 +9049,15 @@ intrinsics: static_defs: ['const LANE1: i32, const LANE2: i32'] safety: safe types: - - [int64x2_t, int64x1_t, ' let b: int64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };', 'unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [uint64x2_t, uint64x1_t, ' let b: uint64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };', 'unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [poly64x2_t, poly64x1_t, ' let b: poly64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };', 'unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] - - [float64x2_t, float64x1_t, ' let b: float64x2_t = unsafe { simd_shuffle!(b, b, [0, 1]) };', 'unsafe { match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] + - [int64x2_t, int64x1_t, ' let b: int64x2_t = simd_shuffle!(b, b, [0, 1]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [uint64x2_t, uint64x1_t, ' let b: uint64x2_t = simd_shuffle!(b, b, [0, 1]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [poly64x2_t, poly64x1_t, ' let b: poly64x2_t = simd_shuffle!(b, b, [0, 1]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] + - [float64x2_t, float64x1_t, ' let b: float64x2_t = simd_shuffle!(b, b, [0, 1]);', 'match LANE1 & 0b1 { 0 => simd_shuffle!(a, b, [2 + LANE2 as u32, 1]), 1 => simd_shuffle!(a, b, [0, 2 + LANE2 as u32]), _ => unreachable_unchecked(), }'] compose: - FnCall: [static_assert_uimm_bits!, [LANE1, '1']] - FnCall: [static_assert!, ['LANE2 == 0']] - - Identifier: ['{type[2]}', Symbol] - - Identifier: ['{type[3]}', Symbol] + - Identifier: ['{type[2]}', UnsafeSymbol] + - Identifier: ['{type[3]}', UnsafeSymbol] - name: "vcopyq_lane_f32" doc: "Insert vector element from another vector element" @@ -9070,12 +9070,12 @@ intrinsics: static_defs: ['const LANE1: i32, const LANE2: i32'] safety: safe types: - - [float32x4_t, float32x2_t, ' let b: float32x4_t = unsafe { simd_shuffle!(b, b, [0, 1, 2, 3]) };', 'unsafe { match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), } }'] + - [float32x4_t, float32x2_t, ' let b: float32x4_t = simd_shuffle!(b, b, [0, 1, 2, 3]);', 'match LANE1 & 0b11 { 0 => simd_shuffle!(a, b, [4 + LANE2 as u32, 1, 2, 3]), 1 => simd_shuffle!(a, b, [0, 4 + LANE2 as u32, 2, 3]), 2 => simd_shuffle!(a, b, [0, 1, 4 + LANE2 as u32, 3]), 3 => simd_shuffle!(a, b, [0, 1, 2, 4 + LANE2 as u32]), _ => unreachable_unchecked(), }'] compose: - FnCall: [static_assert_uimm_bits!, [LANE1, 2]] - FnCall: [static_assert_uimm_bits!, [LANE2, 1]] - - Identifier: ["{type[2]}", Symbol] - - Identifier: ["{type[3]}", Symbol] + - Identifier: ["{type[2]}", UnsafeSymbol] + - Identifier: ["{type[3]}", UnsafeSymbol] - name: "vcreate_f64" doc: "Insert vector element from another vector element" @@ -13366,7 +13366,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.saddlv.i32.{neon_type[0]}" arch: aarch64,arm64ec - - Identifier: ["unsafe {{ _vaddlv{neon_type[0].no}(a) as i16 }}", Symbol] + - Identifier: ["_vaddlv{neon_type[0].no}(a) as i16", UnsafeSymbol] - name: "vaddlv{neon_type[0].no}" doc: "Unsigned Add Long across Vector" @@ -13386,7 +13386,7 @@ intrinsics: links: - link: "llvm.aarch64.neon.uaddlv.i32.{neon_type[0]}" arch: aarch64,arm64ec - - Identifier: ["unsafe {{ _vaddlv{neon_type[0].no}(a) as u16 }}", Symbol] + - Identifier: ["_vaddlv{neon_type[0].no}(a) as u16", UnsafeSymbol] - name: "vmaxv{neon_type[0].no}" doc: "Horizontal vector max." diff --git a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml index 60fa830e5da63..7b8ddf43742d2 100644 --- a/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml +++ b/library/stdarch/crates/stdarch-gen-arm/spec/neon/arm_shared.spec.yml @@ -1729,15 +1729,15 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int8x8_t, ' static_assert_uimm_bits!(N, 3);', 'unsafe { match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), } }'] - - [int16x8_t, ' static_assert_uimm_bits!(N, 3);', 'unsafe { match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), } }'] - - [uint8x8_t, ' static_assert_uimm_bits!(N, 3);', 'unsafe { match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), } }'] - - [uint16x8_t, ' static_assert_uimm_bits!(N, 3);', 'unsafe { match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), } }'] - - [poly8x8_t, ' static_assert_uimm_bits!(N, 3);', 'unsafe { match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), } }'] - - [poly16x8_t, ' static_assert_uimm_bits!(N, 3);', 'unsafe { match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), } }'] + - [int8x8_t, 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [int16x8_t, 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [uint8x8_t, 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [uint16x8_t, 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [poly8x8_t, 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] + - [poly16x8_t, 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] compose: - - Identifier: ["{type[1]}", Symbol] - - Identifier: ["{type[2]}", Symbol] + - FnCall: [static_assert_uimm_bits!, [N, 3]] + - Identifier: ["{type[1]}", UnsafeSymbol] - name: "vext{neon_type[0].no}" doc: "Extract vector from pair of vectors" @@ -1753,12 +1753,12 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int8x16_t, ' static_assert_uimm_bits!(N, 4);', 'unsafe { match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), } }'] - - [uint8x16_t, ' static_assert_uimm_bits!(N, 4);', 'unsafe { match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), } }'] - - [poly8x16_t, ' static_assert_uimm_bits!(N, 4);', 'unsafe { match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), } }'] + - [int8x16_t, 'match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), }'] + - [uint8x16_t, 'match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), }'] + - [poly8x16_t, 'match N & 0b1111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]), 8 => simd_shuffle!(a, b, [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23]), 9 => simd_shuffle!(a, b, [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]), 10 => simd_shuffle!(a, b, [10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25]), 11 => simd_shuffle!(a, b, [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]), 12 => simd_shuffle!(a, b, [12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27]), 13 => simd_shuffle!(a, b, [13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28]), 14 => simd_shuffle!(a, b, [14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]), 15 => simd_shuffle!(a, b, [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]), _ => unreachable_unchecked(), }'] compose: - - Identifier: ["{type[1]}", Symbol] - - Identifier: ["{type[2]}", Symbol] + - FnCall: [static_assert_uimm_bits!, [N, 4]] + - Identifier: ["{type[1]}", UnsafeSymbol] - name: "vext{neon_type[0].no}" doc: "Extract vector from pair of vectors" @@ -1774,15 +1774,15 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int16x4_t, 'static_assert_uimm_bits!(N, 2);', 'unsafe { match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), } }'] - - [int32x4_t, ' static_assert_uimm_bits!(N, 2);', 'unsafe { match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), } }'] - - [uint16x4_t, ' static_assert_uimm_bits!(N, 2);', 'unsafe { match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), } }'] - - [uint32x4_t, ' static_assert_uimm_bits!(N, 2);', 'unsafe { match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), } }'] - - [poly16x4_t, ' static_assert_uimm_bits!(N, 2);', 'unsafe { match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), } }'] - - [float32x4_t, ' static_assert_uimm_bits!(N, 2);', 'unsafe { match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), } }'] + - [int16x4_t,'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [int32x4_t, 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [uint16x4_t, 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [uint32x4_t, 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [poly16x4_t, 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] + - [float32x4_t, 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] compose: - - Identifier: ["{type[1]}", Symbol] - - Identifier: ["{type[2]}", Symbol] + - FnCall: [static_assert_uimm_bits!, [N, 2]] + - Identifier: ["{type[1]}", UnsafeSymbol] - name: "vext{neon_type[0].no}" @@ -1801,9 +1801,10 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [float16x4_t, ' static_assert_uimm_bits!(N, 2); unsafe { match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), } }'] + - [float16x4_t, 'match N & 0b11 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6]), _ => unreachable_unchecked(), }'] compose: - - Identifier: ["{type[1]}", Symbol] + - FnCall: [static_assert_uimm_bits!, [N, 2]] + - Identifier: ["{type[1]}", UnsafeSymbol] - name: "vext{neon_type[0].no}" doc: "Extract vector from pair of vectors" @@ -1821,9 +1822,10 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [float16x8_t, ' static_assert_uimm_bits!(N, 3); unsafe { match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), } }'] + - [float16x8_t, 'match N & 0b111 { 0 => simd_shuffle!(a, b, [0, 1, 2, 3, 4, 5, 6, 7]), 1 => simd_shuffle!(a, b, [1, 2, 3, 4, 5, 6, 7, 8]), 2 => simd_shuffle!(a, b, [2, 3, 4, 5, 6, 7, 8, 9]), 3 => simd_shuffle!(a, b, [3, 4, 5, 6, 7, 8, 9, 10]), 4 => simd_shuffle!(a, b, [4, 5, 6, 7, 8, 9, 10, 11]), 5 => simd_shuffle!(a, b, [5, 6, 7, 8, 9, 10, 11, 12]), 6 => simd_shuffle!(a, b, [6, 7, 8, 9, 10, 11, 12, 13]), 7 => simd_shuffle!(a, b, [7, 8, 9, 10, 11, 12, 13, 14]), _ => unreachable_unchecked(), }'] compose: - - Identifier: ["{type[1]}", Symbol] + - FnCall: [static_assert_uimm_bits!, [N, 3]] + - Identifier: ["{type[1]}", UnsafeSymbol] @@ -1841,12 +1843,12 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int32x2_t, ' static_assert_uimm_bits!(N, 1);', 'unsafe { match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), } }'] - - [uint32x2_t, ' static_assert_uimm_bits!(N, 1);', 'unsafe { match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), } }'] - - [float32x2_t, ' static_assert_uimm_bits!(N, 1);', 'unsafe { match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), } }'] + - [int32x2_t, 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [uint32x2_t, 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [float32x2_t, 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] compose: - - Identifier: ["{type[1]}", Symbol] - - Identifier: ["{type[2]}", Symbol] + - FnCall: [static_assert_uimm_bits!, [N, 1]] + - Identifier: ["{type[1]}", UnsafeSymbol] - name: "vext{neon_type[0].no}" doc: "Extract vector from pair of vectors" @@ -1862,11 +1864,11 @@ intrinsics: static_defs: ['const N: i32'] safety: safe types: - - [int64x2_t, 'static_assert_uimm_bits!(N, 1);', 'unsafe { match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), } }'] - - [uint64x2_t, 'static_assert_uimm_bits!(N, 1);', 'unsafe { match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), } }'] + - [int64x2_t, 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] + - [uint64x2_t, 'match N & 0b1 { 0 => simd_shuffle!(a, b, [0, 1]), 1 => simd_shuffle!(a, b, [1, 2]), _ => unreachable_unchecked(), }'] compose: - - Identifier: ["{type[1]}", Symbol] - - Identifier: ["{type[2]}", Symbol] + - FnCall: [static_assert_uimm_bits!, [N, 1]] + - Identifier: ["{type[1]}", UnsafeSymbol] - name: "vmla{neon_type[0].no}" doc: "Multiply-add to accumulator" @@ -15007,10 +15009,10 @@ intrinsics: - *neon-cfg-arm-unstable safety: safe types: - - ['vget_high_s64', 'int64x2_t', 'int64x1_t', 'vmov', 'ext', 'unsafe { int64x1_t([simd_extract!(a, 1)]) }'] - - ['vget_high_u64', 'uint64x2_t', 'uint64x1_t', 'vmov', 'ext', 'unsafe { uint64x1_t([simd_extract!(a, 1)]) }'] + - ['vget_high_s64', 'int64x2_t', 'int64x1_t', 'vmov', 'ext', 'int64x1_t([simd_extract!(a, 1)])'] + - ['vget_high_u64', 'uint64x2_t', 'uint64x1_t', 'vmov', 'ext', 'uint64x1_t([simd_extract!(a, 1)])'] compose: - - Identifier: ['{type[5]}', Symbol] + - Identifier: ['{type[5]}', UnsafeSymbol] - name: "{type[0]}" doc: "Duplicate vector element to vector or scalar" @@ -15023,10 +15025,10 @@ intrinsics: - *neon-cfg-arm-unstable safety: safe types: - - ['vget_low_s64', 'int64x2_t', 'int64x1_t', 'unsafe { int64x1_t([simd_extract!(a, 0)]) }'] - - ['vget_low_u64', 'uint64x2_t', 'uint64x1_t', 'unsafe { uint64x1_t([simd_extract!(a, 0)]) }'] + - ['vget_low_s64', 'int64x2_t', 'int64x1_t', 'int64x1_t([simd_extract!(a, 0)])'] + - ['vget_low_u64', 'uint64x2_t', 'uint64x1_t', 'uint64x1_t([simd_extract!(a, 0)])'] compose: - - Identifier: ['{type[3]}', Symbol] + - Identifier: ['{type[3]}', UnsafeSymbol] - name: "{type[0]}" doc: "Duplicate vector element to vector or scalar" From 0281120cbab34289beb0753b43fe6d3d6d6311f7 Mon Sep 17 00:00:00 2001 From: sayantn Date: Thu, 30 Apr 2026 06:54:09 +0530 Subject: [PATCH 22/30] Fix `fixupimm` --- .../crates/core_arch/src/x86/avx512f.rs | 80 +++++++++---------- 1 file changed, 40 insertions(+), 40 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/x86/avx512f.rs b/library/stdarch/crates/core_arch/src/x86/avx512f.rs index 2c5002a2d000d..94c4269c8fe51 100644 --- a/library/stdarch/crates/core_arch/src/x86/avx512f.rs +++ b/library/stdarch/crates/core_arch/src/x86/avx512f.rs @@ -42633,7 +42633,7 @@ pub fn _mm_mask3_fnmsub_round_sd( } } -/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting. +/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst, and copy the upper 3 packed elements from b to the upper elements of dst. imm8 is used to set the required flags reporting. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_ss&expand=2517) #[inline] @@ -42649,12 +42649,12 @@ pub fn _mm_fixupimm_ss(a: __m128, b: __m128, c: __m128i) -> __m let c = c.as_i32x4(); let r = vfixupimmss(a, b, c, IMM8, 0b11111111, _MM_FROUND_CUR_DIRECTION); let fixupimm: f32 = simd_extract!(r, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting. +/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from b to the upper elements of dst. imm8 is used to set the required flags reporting. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_ss&expand=2518) #[inline] @@ -42675,12 +42675,12 @@ pub fn _mm_mask_fixupimm_ss( let c = c.as_i32x4(); let fixupimm = vfixupimmss(a, b, c, IMM8, k, _MM_FROUND_CUR_DIRECTION); let fixupimm: f32 = simd_extract!(fixupimm, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting. +/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from b to the upper elements of dst. imm8 is used to set the required flags reporting. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_ss&expand=2519) #[inline] @@ -42701,12 +42701,12 @@ pub fn _mm_maskz_fixupimm_ss( let c = c.as_i32x4(); let fixupimm = vfixupimmssz(a, b, c, IMM8, k, _MM_FROUND_CUR_DIRECTION); let fixupimm: f32 = simd_extract!(fixupimm, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting. +/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst, and copy the upper element from b to the upper element of dst. imm8 is used to set the required flags reporting. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_sd&expand=2514) #[inline] @@ -42722,12 +42722,12 @@ pub fn _mm_fixupimm_sd(a: __m128d, b: __m128d, c: __m128i) -> _ let c = c.as_i64x2(); let fixupimm = vfixupimmsd(a, b, c, IMM8, 0b11111111, _MM_FROUND_CUR_DIRECTION); let fixupimm: f64 = simd_extract!(fixupimm, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting. +/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from b to the upper element of dst. imm8 is used to set the required flags reporting. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_sd&expand=2515) #[inline] @@ -42748,12 +42748,12 @@ pub fn _mm_mask_fixupimm_sd( let c = c.as_i64x2(); let fixupimm = vfixupimmsd(a, b, c, IMM8, k, _MM_FROUND_CUR_DIRECTION); let fixupimm: f64 = simd_extract!(fixupimm, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting. +/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from b to the upper element of dst. imm8 is used to set the required flags reporting. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_sd&expand=2516) #[inline] @@ -42774,12 +42774,12 @@ pub fn _mm_maskz_fixupimm_sd( let c = c.as_i64x2(); let fixupimm = vfixupimmsdz(a, b, c, IMM8, k, _MM_FROUND_CUR_DIRECTION); let fixupimm: f64 = simd_extract!(fixupimm, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst, and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting.\ +/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst, and copy the upper 3 packed elements from b to the upper elements of dst. imm8 is used to set the required flags reporting.\ /// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_round_ss&expand=2511) @@ -42801,12 +42801,12 @@ pub fn _mm_fixupimm_round_ss( let c = c.as_i32x4(); let r = vfixupimmss(a, b, c, IMM8, 0b11111111, SAE); let fixupimm: f32 = simd_extract!(r, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting.\ +/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper 3 packed elements from b to the upper elements of dst. imm8 is used to set the required flags reporting.\ /// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_round_ss&expand=2512) @@ -42829,12 +42829,12 @@ pub fn _mm_mask_fixupimm_round_ss( let c = c.as_i32x4(); let r = vfixupimmss(a, b, c, IMM8, k, SAE); let fixupimm: f32 = simd_extract!(r, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from a to the upper elements of dst. imm8 is used to set the required flags reporting.\ +/// Fix up the lower single-precision (32-bit) floating-point elements in a and b using the lower 32-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper 3 packed elements from b to the upper elements of dst. imm8 is used to set the required flags reporting.\ /// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_round_ss&expand=2513) @@ -42857,12 +42857,12 @@ pub fn _mm_maskz_fixupimm_round_ss( let c = c.as_i32x4(); let r = vfixupimmssz(a, b, c, IMM8, k, SAE); let fixupimm: f32 = simd_extract!(r, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst, and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting.\ +/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst, and copy the upper element from b to the upper element of dst. imm8 is used to set the required flags reporting.\ /// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_fixupimm_round_sd&expand=2508) @@ -42884,12 +42884,12 @@ pub fn _mm_fixupimm_round_sd( let c = c.as_i64x2(); let r = vfixupimmsd(a, b, c, IMM8, 0b11111111, SAE); let fixupimm: f64 = simd_extract!(r, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting.\ +/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using writemask k (the element is copied from a when mask bit 0 is not set), and copy the upper element from b to the upper element of dst. imm8 is used to set the required flags reporting.\ /// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_mask_fixupimm_round_sd&expand=2509) @@ -42912,12 +42912,12 @@ pub fn _mm_mask_fixupimm_round_sd( let c = c.as_i64x2(); let r = vfixupimmsd(a, b, c, IMM8, k, SAE); let fixupimm: f64 = simd_extract!(r, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } -/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from a to the upper element of dst. imm8 is used to set the required flags reporting.\ +/// Fix up the lower double-precision (64-bit) floating-point elements in a and b using the lower 64-bit integer in c, store the result in the lower element of dst using zeromask k (the element is zeroed out when mask bit 0 is not set), and copy the upper element from b to the upper element of dst. imm8 is used to set the required flags reporting.\ /// Exceptions can be suppressed by passing _MM_FROUND_NO_EXC in the sae parameter. /// /// [Intel's documentation](https://www.intel.com/content/www/us/en/docs/intrinsics-guide/index.html#text=mm_maskz_fixupimm_round_sd&expand=2510) @@ -42940,7 +42940,7 @@ pub fn _mm_maskz_fixupimm_round_sd( let c = c.as_i64x2(); let r = vfixupimmsdz(a, b, c, IMM8, k, SAE); let fixupimm: f64 = simd_extract!(r, 0); - let r = simd_insert!(a, 0, fixupimm); + let r = simd_insert!(b, 0, fixupimm); transmute(r) } } @@ -61830,7 +61830,7 @@ mod tests { let b = _mm_set1_ps(f32::MAX); let c = _mm_set1_epi32(i32::MAX); let r = _mm_fixupimm_ss::<5>(a, b, c); - let e = _mm_set_ps(0., 0., 0., -0.0); + let e = _mm_set_ps(f32::MAX, f32::MAX, f32::MAX, -0.0); assert_eq_m128(r, e); } @@ -61840,7 +61840,7 @@ mod tests { let b = _mm_set1_ps(f32::MAX); let c = _mm_set1_epi32(i32::MAX); let r = _mm_mask_fixupimm_ss::<5>(a, 0b11111111, b, c); - let e = _mm_set_ps(0., 0., 0., -0.0); + let e = _mm_set_ps(f32::MAX, f32::MAX, f32::MAX, -0.0); assert_eq_m128(r, e); } @@ -61850,10 +61850,10 @@ mod tests { let b = _mm_set1_ps(f32::MAX); let c = _mm_set1_epi32(i32::MAX); let r = _mm_maskz_fixupimm_ss::<5>(0b00000000, a, b, c); - let e = _mm_set_ps(0., 0., 0., 0.0); + let e = _mm_set_ps(f32::MAX, f32::MAX, f32::MAX, 0.0); assert_eq_m128(r, e); let r = _mm_maskz_fixupimm_ss::<5>(0b11111111, a, b, c); - let e = _mm_set_ps(0., 0., 0., -0.0); + let e = _mm_set_ps(f32::MAX, f32::MAX, f32::MAX, -0.0); assert_eq_m128(r, e); } @@ -61863,7 +61863,7 @@ mod tests { let b = _mm_set1_pd(f64::MAX); let c = _mm_set1_epi64x(i32::MAX as i64); let r = _mm_fixupimm_sd::<5>(a, b, c); - let e = _mm_set_pd(0., -0.0); + let e = _mm_set_pd(f64::MAX, -0.0); assert_eq_m128d(r, e); } @@ -61873,7 +61873,7 @@ mod tests { let b = _mm_set1_pd(f64::MAX); let c = _mm_set1_epi64x(i32::MAX as i64); let r = _mm_mask_fixupimm_sd::<5>(a, 0b11111111, b, c); - let e = _mm_set_pd(0., -0.0); + let e = _mm_set_pd(f64::MAX, -0.0); assert_eq_m128d(r, e); } @@ -61883,10 +61883,10 @@ mod tests { let b = _mm_set1_pd(f64::MAX); let c = _mm_set1_epi64x(i32::MAX as i64); let r = _mm_maskz_fixupimm_sd::<5>(0b00000000, a, b, c); - let e = _mm_set_pd(0., 0.0); + let e = _mm_set_pd(f64::MAX, 0.0); assert_eq_m128d(r, e); let r = _mm_maskz_fixupimm_sd::<5>(0b11111111, a, b, c); - let e = _mm_set_pd(0., -0.0); + let e = _mm_set_pd(f64::MAX, -0.0); assert_eq_m128d(r, e); } @@ -61896,7 +61896,7 @@ mod tests { let b = _mm_set1_ps(f32::MAX); let c = _mm_set1_epi32(i32::MAX); let r = _mm_fixupimm_round_ss::<5, _MM_FROUND_CUR_DIRECTION>(a, b, c); - let e = _mm_set_ps(1., 0., 0., -0.0); + let e = _mm_set_ps(f32::MAX, f32::MAX, f32::MAX, -0.0); assert_eq_m128(r, e); } @@ -61906,7 +61906,7 @@ mod tests { let b = _mm_set1_ps(f32::MAX); let c = _mm_set1_epi32(i32::MAX); let r = _mm_mask_fixupimm_round_ss::<5, _MM_FROUND_CUR_DIRECTION>(a, 0b11111111, b, c); - let e = _mm_set_ps(0., 0., 0., -0.0); + let e = _mm_set_ps(f32::MAX, f32::MAX, f32::MAX, -0.0); assert_eq_m128(r, e); } @@ -61916,10 +61916,10 @@ mod tests { let b = _mm_set1_ps(f32::MAX); let c = _mm_set1_epi32(i32::MAX); let r = _mm_maskz_fixupimm_round_ss::<5, _MM_FROUND_CUR_DIRECTION>(0b00000000, a, b, c); - let e = _mm_set_ps(0., 0., 0., 0.0); + let e = _mm_set_ps(f32::MAX, f32::MAX, f32::MAX, 0.0); assert_eq_m128(r, e); let r = _mm_maskz_fixupimm_round_ss::<5, _MM_FROUND_CUR_DIRECTION>(0b11111111, a, b, c); - let e = _mm_set_ps(0., 0., 0., -0.0); + let e = _mm_set_ps(f32::MAX, f32::MAX, f32::MAX, -0.0); assert_eq_m128(r, e); } @@ -61929,7 +61929,7 @@ mod tests { let b = _mm_set1_pd(f64::MAX); let c = _mm_set1_epi64x(i32::MAX as i64); let r = _mm_fixupimm_round_sd::<5, _MM_FROUND_CUR_DIRECTION>(a, b, c); - let e = _mm_set_pd(0., -0.0); + let e = _mm_set_pd(f64::MAX, -0.0); assert_eq_m128d(r, e); } @@ -61939,7 +61939,7 @@ mod tests { let b = _mm_set1_pd(f64::MAX); let c = _mm_set1_epi64x(i32::MAX as i64); let r = _mm_mask_fixupimm_round_sd::<5, _MM_FROUND_CUR_DIRECTION>(a, 0b11111111, b, c); - let e = _mm_set_pd(0., -0.0); + let e = _mm_set_pd(f64::MAX, -0.0); assert_eq_m128d(r, e); } @@ -61949,10 +61949,10 @@ mod tests { let b = _mm_set1_pd(f64::MAX); let c = _mm_set1_epi64x(i32::MAX as i64); let r = _mm_maskz_fixupimm_round_sd::<5, _MM_FROUND_CUR_DIRECTION>(0b00000000, a, b, c); - let e = _mm_set_pd(0., 0.0); + let e = _mm_set_pd(f64::MAX, 0.0); assert_eq_m128d(r, e); let r = _mm_maskz_fixupimm_round_sd::<5, _MM_FROUND_CUR_DIRECTION>(0b11111111, a, b, c); - let e = _mm_set_pd(0., -0.0); + let e = _mm_set_pd(f64::MAX, -0.0); assert_eq_m128d(r, e); } From 753900705eb8449868f67cd96b5112e963421775 Mon Sep 17 00:00:00 2001 From: The rustc-josh-sync Cronjob Bot Date: Mon, 4 May 2026 05:12:53 +0000 Subject: [PATCH 23/30] Prepare for merging from rust-lang/rust This updates the rust-version file to 045b17737dab5fcc28e4cbee0cfe2ce4ed363b32. --- library/stdarch/rust-version | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/library/stdarch/rust-version b/library/stdarch/rust-version index e9fc6c4cd023e..59e9e5a0e6ee9 100644 --- a/library/stdarch/rust-version +++ b/library/stdarch/rust-version @@ -1 +1 @@ -e22c616e4e87914135c1db261a03e0437255335e +045b17737dab5fcc28e4cbee0cfe2ce4ed363b32 From a8d85e63681223aea0f8013307b8a4de2fe929b9 Mon Sep 17 00:00:00 2001 From: Alyssa Haroldsen Date: Tue, 5 May 2026 13:31:05 -0700 Subject: [PATCH 24/30] tests/ui: allow spaces in hashbrown src normalization If one's home directory contains a space, the default location for the hashbrown source location also contains a space, and so the UI test normalization in issue-21763 fails to normalize as expected. While this new regex does not handle all valid paths, such as those beginning with `\\?\` or `\\name\`, this handles most absolute UNIX and Windows paths. Relative paths don't seem to be applicable. --- tests/ui/issues/issue-21763.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/ui/issues/issue-21763.rs b/tests/ui/issues/issue-21763.rs index c1ed5d94f9b55..5f236a25e9944 100644 --- a/tests/ui/issues/issue-21763.rs +++ b/tests/ui/issues/issue-21763.rs @@ -1,6 +1,6 @@ // Regression test for HashMap only impl'ing Send/Sync if its contents do -//@ normalize-stderr: "\S+[\\/]hashbrown\S+" -> "$$HASHBROWN_SRC_LOCATION" +//@ normalize-stderr: "(?:[A-Za-z]:[/\\]|/).*[\\/]hashbrown\S+" -> "$$HASHBROWN_SRC_LOCATION" use std::collections::HashMap; use std::rc::Rc; From facd03622bf470da11a98316a590111ad4522ff1 Mon Sep 17 00:00:00 2001 From: mejrs <59372212+mejrs@users.noreply.github.com> Date: Wed, 6 May 2026 14:39:39 +0200 Subject: [PATCH 25/30] Don't return dummy MacroData in `get_macro` --- .../rustc_resolve/src/build_reduced_graph.rs | 11 +++--- compiler/rustc_resolve/src/lib.rs | 11 +++--- compiler/rustc_resolve/src/macros.rs | 4 +- tests/ui/cfg/reserved-macro-names-rename.rs | 29 ++++++++++++++ .../ui/cfg/reserved-macro-names-rename.stderr | 39 +++++++++++++++++++ 5 files changed, 81 insertions(+), 13 deletions(-) create mode 100644 tests/ui/cfg/reserved-macro-names-rename.rs create mode 100644 tests/ui/cfg/reserved-macro-names-rename.stderr diff --git a/compiler/rustc_resolve/src/build_reduced_graph.rs b/compiler/rustc_resolve/src/build_reduced_graph.rs index 4c307e8a6a3d7..15eecf4604c17 100644 --- a/compiler/rustc_resolve/src/build_reduced_graph.rs +++ b/compiler/rustc_resolve/src/build_reduced_graph.rs @@ -37,8 +37,8 @@ use crate::ref_mut::CmCell; use crate::{ BindingKey, Decl, DeclData, DeclKind, DelayedVisResolutionError, ExternModule, ExternPreludeEntry, Finalize, IdentKey, LocalModule, MacroData, Module, ModuleKind, - ModuleOrUniformRoot, ParentScope, PathResult, Res, Resolver, Segment, Used, VisResolutionError, - errors, + ModuleOrUniformRoot, ParentScope, PathResult, Res, Resolver, Segment, SyntaxExtension, Used, + VisResolutionError, errors, }; impl<'ra, 'tcx> Resolver<'ra, 'tcx> { @@ -201,10 +201,11 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } } - pub(crate) fn get_macro(&self, res: Res) -> Option<&'ra MacroData> { + /// Gets the `SyntaxExtension` corresponding to `res`. + pub(crate) fn get_macro(&self, res: Res) -> Option<&Arc> { match res { - Res::Def(DefKind::Macro(..), def_id) => Some(self.get_macro_by_def_id(def_id)), - Res::NonMacroAttr(_) => Some(self.non_macro_attr), + Res::Def(DefKind::Macro(..), def_id) => Some(&self.get_macro_by_def_id(def_id).ext), + Res::NonMacroAttr(_) => Some(&self.non_macro_attr), _ => None, } } diff --git a/compiler/rustc_resolve/src/lib.rs b/compiler/rustc_resolve/src/lib.rs index b15e4b1b72774..d0c30785c744c 100644 --- a/compiler/rustc_resolve/src/lib.rs +++ b/compiler/rustc_resolve/src/lib.rs @@ -1392,7 +1392,7 @@ pub struct Resolver<'ra, 'tcx> { extern_macro_map: CacheRefCell>, dummy_ext_bang: Arc, dummy_ext_derive: Arc, - non_macro_attr: &'ra MacroData, + non_macro_attr: Arc, local_macro_def_scopes: FxHashMap> = default::fx_hash_map(), ast_transform_scopes: FxHashMap> = default::fx_hash_map(), unused_macros: FxIndexMap, @@ -1812,8 +1812,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { extern_macro_map: Default::default(), dummy_ext_bang: Arc::new(SyntaxExtension::dummy_bang(edition)), dummy_ext_derive: Arc::new(SyntaxExtension::dummy_derive(edition)), - non_macro_attr: arenas - .alloc_macro(MacroData::new(Arc::new(SyntaxExtension::non_macro_attr(edition)))), + non_macro_attr: Arc::new(SyntaxExtension::non_macro_attr(edition)), unused_macros: Default::default(), unused_macro_rules: Default::default(), single_segment_macro_resolutions: Default::default(), @@ -1984,7 +1983,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { match macro_kind { MacroKind::Bang => Arc::clone(&self.dummy_ext_bang), MacroKind::Derive => Arc::clone(&self.dummy_ext_derive), - MacroKind::Attr => Arc::clone(&self.non_macro_attr.ext), + MacroKind::Attr => Arc::clone(&self.non_macro_attr), } } @@ -2013,11 +2012,11 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } fn is_builtin_macro(&self, res: Res) -> bool { - self.get_macro(res).is_some_and(|macro_data| macro_data.ext.builtin_name.is_some()) + self.get_macro(res).is_some_and(|ext| ext.builtin_name.is_some()) } fn is_specific_builtin_macro(&self, res: Res, symbol: Symbol) -> bool { - self.get_macro(res).is_some_and(|macro_data| macro_data.ext.builtin_name == Some(symbol)) + self.get_macro(res).is_some_and(|ext| ext.builtin_name == Some(symbol)) } fn macro_def(&self, mut ctxt: SyntaxContext) -> DefId { diff --git a/compiler/rustc_resolve/src/macros.rs b/compiler/rustc_resolve/src/macros.rs index dd9f500ff88d0..2cfe20e25a27f 100644 --- a/compiler/rustc_resolve/src/macros.rs +++ b/compiler/rustc_resolve/src/macros.rs @@ -882,7 +882,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { } _ => None, }, - None => self.get_macro(res).map(|macro_data| Arc::clone(¯o_data.ext)), + None => self.get_macro(res).map(Arc::clone), }; Ok((ext, res)) } @@ -1212,7 +1212,7 @@ impl<'ra, 'tcx> Resolver<'ra, 'tcx> { // Reserve some names that are not quite covered by the general check // performed on `Resolver::builtin_attrs`. if name == sym::cfg || name == sym::cfg_attr { - let macro_kinds = self.get_macro(res).map(|macro_data| macro_data.ext.macro_kinds()); + let macro_kinds = res.macro_kinds(); if macro_kinds.is_some() && sub_namespace_match(macro_kinds, Some(MacroKind::Attr)) { self.dcx().emit_err(errors::NameReservedInAttributeNamespace { span, ident: name }); } diff --git a/tests/ui/cfg/reserved-macro-names-rename.rs b/tests/ui/cfg/reserved-macro-names-rename.rs new file mode 100644 index 0000000000000..048758407a03b --- /dev/null +++ b/tests/ui/cfg/reserved-macro-names-rename.rs @@ -0,0 +1,29 @@ +//@edition:2018 + +#![crate_type = "lib"] + + +mod a { + use ignore as cfg; + //~^ERROR name `cfg` is reserved in attribute namespace +} + +mod b { + use cfg_attr as cfg; + //~^ERROR name `cfg` is reserved in attribute namespace +} + +mod c { + use cfg as cfg; + //~^ERROR `cfg` is ambiguous +} + +mod d { + use inline as cfg_attr; + //~^ERROR name `cfg_attr` is reserved in attribute namespace +} + +mod e { + use not_found as cfg; // trigger "unresolved import", not "cfg reserved". + //~^ ERROR unresolved import `not_found` +} diff --git a/tests/ui/cfg/reserved-macro-names-rename.stderr b/tests/ui/cfg/reserved-macro-names-rename.stderr new file mode 100644 index 0000000000000..d6327912a581f --- /dev/null +++ b/tests/ui/cfg/reserved-macro-names-rename.stderr @@ -0,0 +1,39 @@ +error: name `cfg` is reserved in attribute namespace + --> $DIR/reserved-macro-names-rename.rs:7:19 + | +LL | use ignore as cfg; + | ^^^ + +error: name `cfg` is reserved in attribute namespace + --> $DIR/reserved-macro-names-rename.rs:12:21 + | +LL | use cfg_attr as cfg; + | ^^^ + +error: name `cfg_attr` is reserved in attribute namespace + --> $DIR/reserved-macro-names-rename.rs:22:19 + | +LL | use inline as cfg_attr; + | ^^^^^^^^ + +error[E0432]: unresolved import `not_found` + --> $DIR/reserved-macro-names-rename.rs:27:9 + | +LL | use not_found as cfg; // trigger "unresolved import", not "cfg reserved". + | ^^^^^^^^^^^^^^^^ no external crate `not_found` + +error[E0659]: `cfg` is ambiguous + --> $DIR/reserved-macro-names-rename.rs:17:9 + | +LL | use cfg as cfg; + | ^^^ ambiguous name + | + = note: ambiguous because of a name conflict with a builtin attribute + = note: `cfg` could refer to a built-in attribute +note: `cfg` could also refer to a macro from prelude + --> $SRC_DIR/std/src/prelude/mod.rs:LL:COL + +error: aborting due to 5 previous errors + +Some errors have detailed explanations: E0432, E0659. +For more information about an error, try `rustc --explain E0432`. From 1d2b6ed86b89ab4c0db3015d51366b19fc2d1110 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Wed, 6 May 2026 16:27:55 +0100 Subject: [PATCH 26/30] Remove #[inline(always)] from Arm intrinsic generator & re-generate intrinsics --- .../core_arch/src/aarch64/neon/generated.rs | 3870 ++++----- .../core_arch/src/aarch64/sve/generated.rs | 7328 ++++++++--------- .../core_arch/src/aarch64/sve2/generated.rs | 3774 ++++----- .../src/arm_shared/neon/generated.rs | 6420 +++++++-------- .../crates/stdarch-gen-arm/src/intrinsic.rs | 2 +- 5 files changed, 10697 insertions(+), 10697 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs index fd7a04146bac0..898ccfc9edeae 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/neon/generated.rs @@ -14,7 +14,7 @@ use super::*; #[doc = "CRC32-C single round checksum for quad words (64 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"] -#[inline(always)] +#[inline] #[target_feature(enable = "crc")] #[cfg_attr(test, assert_instr(crc32cx))] #[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")] @@ -30,7 +30,7 @@ pub fn __crc32cd(crc: u32, data: u64) -> u32 { } #[doc = "CRC32 single round checksum for quad words (64 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"] -#[inline(always)] +#[inline] #[target_feature(enable = "crc")] #[cfg_attr(test, assert_instr(crc32x))] #[stable(feature = "stdarch_aarch64_crc32", since = "1.80.0")] @@ -46,7 +46,7 @@ pub fn __crc32d(crc: u32, data: u64) -> u32 { } #[doc = "Floating-point JavaScript convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__jcvt)"] -#[inline(always)] +#[inline] #[target_feature(enable = "jsconv")] #[cfg_attr(test, assert_instr(fjcvtzs))] #[stable(feature = "stdarch_aarch64_jscvt", since = "1.95.0")] @@ -62,7 +62,7 @@ pub fn __jcvt(a: f64) -> i32 { } #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))] @@ -77,7 +77,7 @@ pub fn vabal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { } #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))] @@ -92,7 +92,7 @@ pub fn vabal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { } #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sabal2))] @@ -107,7 +107,7 @@ pub fn vabal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { } #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))] @@ -121,7 +121,7 @@ pub fn vabal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t } #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))] @@ -135,7 +135,7 @@ pub fn vabal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t } #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uabal2))] @@ -149,7 +149,7 @@ pub fn vabal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t } #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] @@ -165,7 +165,7 @@ pub fn vabd_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] @@ -181,7 +181,7 @@ pub fn vabdq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] @@ -190,7 +190,7 @@ pub fn vabdd_f64(a: f64, b: f64) -> f64 { } #[doc = "Floating-point absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabds_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fabd))] @@ -199,7 +199,7 @@ pub fn vabds_f32(a: f32, b: f32) -> f32 { } #[doc = "Floating-point absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -209,7 +209,7 @@ pub fn vabdh_f16(a: f16, b: f16) -> f16 { } #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sabdl2))] @@ -223,7 +223,7 @@ pub fn vabdl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { } #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sabdl2))] @@ -237,7 +237,7 @@ pub fn vabdl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { } #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sabdl2))] @@ -251,7 +251,7 @@ pub fn vabdl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { } #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -264,7 +264,7 @@ pub fn vabdl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { } #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -277,7 +277,7 @@ pub fn vabdl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { } #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uabdl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -290,7 +290,7 @@ pub fn vabdl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { } #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fabs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -299,7 +299,7 @@ pub fn vabs_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fabs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -308,7 +308,7 @@ pub fn vabsq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Absolute Value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] @@ -321,7 +321,7 @@ pub fn vabs_s64(a: int64x1_t) -> int64x1_t { } #[doc = "Absolute Value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] @@ -334,7 +334,7 @@ pub fn vabsq_s64(a: int64x2_t) -> int64x2_t { } #[doc = "Absolute Value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(abs))] @@ -350,7 +350,7 @@ pub fn vabsd_s64(a: i64) -> i64 { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -359,7 +359,7 @@ pub fn vaddd_s64(a: i64, b: i64) -> i64 { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -368,7 +368,7 @@ pub fn vaddd_u64(a: u64, b: u64) -> u64 { } #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] @@ -384,7 +384,7 @@ pub fn vaddlv_s16(a: int16x4_t) -> i32 { } #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] @@ -400,7 +400,7 @@ pub fn vaddlvq_s16(a: int16x8_t) -> i32 { } #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] @@ -416,7 +416,7 @@ pub fn vaddlvq_s32(a: int32x4_t) -> i64 { } #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlp))] @@ -432,7 +432,7 @@ pub fn vaddlv_s32(a: int32x2_t) -> i64 { } #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] @@ -448,7 +448,7 @@ pub fn vaddlv_s8(a: int8x8_t) -> i16 { } #[doc = "Signed Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(saddlv))] @@ -464,7 +464,7 @@ pub fn vaddlvq_s8(a: int8x16_t) -> i16 { } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] @@ -480,7 +480,7 @@ pub fn vaddlv_u16(a: uint16x4_t) -> u32 { } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] @@ -496,7 +496,7 @@ pub fn vaddlvq_u16(a: uint16x8_t) -> u32 { } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] @@ -512,7 +512,7 @@ pub fn vaddlvq_u32(a: uint32x4_t) -> u64 { } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlp))] @@ -528,7 +528,7 @@ pub fn vaddlv_u32(a: uint32x2_t) -> u64 { } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlv_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] @@ -544,7 +544,7 @@ pub fn vaddlv_u8(a: uint8x8_t) -> u16 { } #[doc = "Unsigned Add Long across Vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddlvq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uaddlv))] @@ -560,7 +560,7 @@ pub fn vaddlvq_u8(a: uint8x16_t) -> u16 { } #[doc = "Floating-point add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] @@ -576,7 +576,7 @@ pub fn vaddv_f32(a: float32x2_t) -> f32 { } #[doc = "Floating-point add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] @@ -592,7 +592,7 @@ pub fn vaddvq_f32(a: float32x4_t) -> f32 { } #[doc = "Floating-point add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] @@ -608,7 +608,7 @@ pub fn vaddvq_f64(a: float64x2_t) -> f64 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -617,7 +617,7 @@ pub fn vaddv_s32(a: int32x2_t) -> i32 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -626,7 +626,7 @@ pub fn vaddv_s8(a: int8x8_t) -> i8 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -635,7 +635,7 @@ pub fn vaddvq_s8(a: int8x16_t) -> i8 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -644,7 +644,7 @@ pub fn vaddv_s16(a: int16x4_t) -> i16 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -653,7 +653,7 @@ pub fn vaddvq_s16(a: int16x8_t) -> i16 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -662,7 +662,7 @@ pub fn vaddvq_s32(a: int32x4_t) -> i32 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -671,7 +671,7 @@ pub fn vaddv_u32(a: uint32x2_t) -> u32 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -680,7 +680,7 @@ pub fn vaddv_u8(a: uint8x8_t) -> u8 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -689,7 +689,7 @@ pub fn vaddvq_u8(a: uint8x16_t) -> u8 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddv_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -698,7 +698,7 @@ pub fn vaddv_u16(a: uint16x4_t) -> u16 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -707,7 +707,7 @@ pub fn vaddvq_u16(a: uint16x8_t) -> u16 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addv))] @@ -716,7 +716,7 @@ pub fn vaddvq_u32(a: uint32x4_t) -> u32 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -725,7 +725,7 @@ pub fn vaddvq_s64(a: int64x2_t) -> i64 { } #[doc = "Add across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddvq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -734,7 +734,7 @@ pub fn vaddvq_u64(a: uint64x2_t) -> u64 { } #[doc = "Multi-vector floating-point absolute maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,faminmax")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))] #[unstable(feature = "faminmax", issue = "137933")] @@ -750,7 +750,7 @@ pub fn vamax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Multi-vector floating-point absolute maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,faminmax")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))] #[unstable(feature = "faminmax", issue = "137933")] @@ -766,7 +766,7 @@ pub fn vamaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Multi-vector floating-point absolute maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamax_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,faminmax")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))] #[unstable(feature = "faminmax", issue = "137933")] @@ -782,7 +782,7 @@ pub fn vamax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Multi-vector floating-point absolute maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,faminmax")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))] #[unstable(feature = "faminmax", issue = "137933")] @@ -798,7 +798,7 @@ pub fn vamaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Multi-vector floating-point absolute maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamaxq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,faminmax")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famax))] #[unstable(feature = "faminmax", issue = "137933")] @@ -814,7 +814,7 @@ pub fn vamaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Multi-vector floating-point absolute minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,faminmax")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))] #[unstable(feature = "faminmax", issue = "137933")] @@ -830,7 +830,7 @@ pub fn vamin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Multi-vector floating-point absolute minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,faminmax")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))] #[unstable(feature = "faminmax", issue = "137933")] @@ -846,7 +846,7 @@ pub fn vaminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Multi-vector floating-point absolute minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vamin_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,faminmax")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))] #[unstable(feature = "faminmax", issue = "137933")] @@ -862,7 +862,7 @@ pub fn vamin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Multi-vector floating-point absolute minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,faminmax")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))] #[unstable(feature = "faminmax", issue = "137933")] @@ -878,7 +878,7 @@ pub fn vaminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Multi-vector floating-point absolute minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaminq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,faminmax")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(famin))] #[unstable(feature = "faminmax", issue = "137933")] @@ -894,7 +894,7 @@ pub fn vaminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -910,7 +910,7 @@ pub fn vbcaxq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -926,7 +926,7 @@ pub fn vbcaxq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -942,7 +942,7 @@ pub fn vbcaxq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -958,7 +958,7 @@ pub fn vbcaxq_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -974,7 +974,7 @@ pub fn vbcaxq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -990,7 +990,7 @@ pub fn vbcaxq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -1006,7 +1006,7 @@ pub fn vbcaxq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { } #[doc = "Bit clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbcaxq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(bcax))] @@ -1022,7 +1022,7 @@ pub fn vbcaxq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -1040,7 +1040,7 @@ pub fn vcadd_rot270_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -1058,7 +1058,7 @@ pub fn vcaddq_rot270_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot270_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1074,7 +1074,7 @@ pub fn vcadd_rot270_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1090,7 +1090,7 @@ pub fn vcaddq_rot270_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot270_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1106,7 +1106,7 @@ pub fn vcaddq_rot270_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -1124,7 +1124,7 @@ pub fn vcadd_rot90_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fcma"))] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -1142,7 +1142,7 @@ pub fn vcaddq_rot90_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcadd_rot90_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1158,7 +1158,7 @@ pub fn vcadd_rot90_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1174,7 +1174,7 @@ pub fn vcaddq_rot90_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Floating-point complex add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaddq_rot90_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcadd))] @@ -1190,7 +1190,7 @@ pub fn vcaddq_rot90_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1206,7 +1206,7 @@ pub fn vcage_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1222,7 +1222,7 @@ pub fn vcageq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaged_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1238,7 +1238,7 @@ pub fn vcaged_f64(a: f64, b: f64) -> u64 { } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcages_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1254,7 +1254,7 @@ pub fn vcages_f32(a: f32, b: f32) -> u32 { } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(facge))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -1271,7 +1271,7 @@ pub fn vcageh_f16(a: f16, b: f16) -> u16 { } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1287,7 +1287,7 @@ pub fn vcagt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1303,7 +1303,7 @@ pub fn vcagtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1319,7 +1319,7 @@ pub fn vcagtd_f64(a: f64, b: f64) -> u64 { } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagts_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1335,7 +1335,7 @@ pub fn vcagts_f32(a: f32, b: f32) -> u32 { } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagth_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(facgt))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -1352,7 +1352,7 @@ pub fn vcagth_f16(a: f16, b: f16) -> u16 { } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1361,7 +1361,7 @@ pub fn vcale_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1370,7 +1370,7 @@ pub fn vcaleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaled_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1379,7 +1379,7 @@ pub fn vcaled_f64(a: f64, b: f64) -> u64 { } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcales_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1388,7 +1388,7 @@ pub fn vcales_f32(a: f32, b: f32) -> u32 { } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(facge))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -1398,7 +1398,7 @@ pub fn vcaleh_f16(a: f16, b: f16) -> u16 { } #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1407,7 +1407,7 @@ pub fn vcalt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1416,7 +1416,7 @@ pub fn vcaltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1425,7 +1425,7 @@ pub fn vcaltd_f64(a: f64, b: f64) -> u64 { } #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalts_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(facgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1434,7 +1434,7 @@ pub fn vcalts_f32(a: f32, b: f32) -> u32 { } #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalth_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(facgt))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -1444,7 +1444,7 @@ pub fn vcalth_f16(a: f16, b: f16) -> u16 { } #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1453,7 +1453,7 @@ pub fn vceq_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1462,7 +1462,7 @@ pub fn vceqq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1471,7 +1471,7 @@ pub fn vceq_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1480,7 +1480,7 @@ pub fn vceqq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1489,7 +1489,7 @@ pub fn vceq_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1498,7 +1498,7 @@ pub fn vceqq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1507,7 +1507,7 @@ pub fn vceq_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1516,7 +1516,7 @@ pub fn vceqq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { } #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1525,7 +1525,7 @@ pub fn vceqd_f64(a: f64, b: f64) -> u64 { } #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqs_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1534,7 +1534,7 @@ pub fn vceqs_f32(a: f32, b: f32) -> u32 { } #[doc = "Compare bitwise equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1543,7 +1543,7 @@ pub fn vceqd_s64(a: i64, b: i64) -> u64 { } #[doc = "Compare bitwise equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1552,7 +1552,7 @@ pub fn vceqd_u64(a: u64, b: u64) -> u64 { } #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -1562,7 +1562,7 @@ pub fn vceqh_f16(a: f16, b: f16) -> u16 { } #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmeq))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -1573,7 +1573,7 @@ pub fn vceqz_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmeq))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -1584,7 +1584,7 @@ pub fn vceqzq_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1594,7 +1594,7 @@ pub fn vceqz_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1604,7 +1604,7 @@ pub fn vceqzq_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1614,7 +1614,7 @@ pub fn vceqz_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1624,7 +1624,7 @@ pub fn vceqzq_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1634,7 +1634,7 @@ pub fn vceqz_s8(a: int8x8_t) -> uint8x8_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1644,7 +1644,7 @@ pub fn vceqzq_s8(a: int8x16_t) -> uint8x16_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1654,7 +1654,7 @@ pub fn vceqz_s16(a: int16x4_t) -> uint16x4_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1664,7 +1664,7 @@ pub fn vceqzq_s16(a: int16x8_t) -> uint16x8_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1674,7 +1674,7 @@ pub fn vceqz_s32(a: int32x2_t) -> uint32x2_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1684,7 +1684,7 @@ pub fn vceqzq_s32(a: int32x4_t) -> uint32x4_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1694,7 +1694,7 @@ pub fn vceqz_s64(a: int64x1_t) -> uint64x1_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1704,7 +1704,7 @@ pub fn vceqzq_s64(a: int64x2_t) -> uint64x2_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1714,7 +1714,7 @@ pub fn vceqz_p8(a: poly8x8_t) -> uint8x8_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1724,7 +1724,7 @@ pub fn vceqzq_p8(a: poly8x16_t) -> uint8x16_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1734,7 +1734,7 @@ pub fn vceqz_p64(a: poly64x1_t) -> uint64x1_t { } #[doc = "Signed compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1744,7 +1744,7 @@ pub fn vceqzq_p64(a: poly64x2_t) -> uint64x2_t { } #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1754,7 +1754,7 @@ pub fn vceqz_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1764,7 +1764,7 @@ pub fn vceqzq_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1774,7 +1774,7 @@ pub fn vceqz_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1784,7 +1784,7 @@ pub fn vceqzq_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1794,7 +1794,7 @@ pub fn vceqz_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1804,7 +1804,7 @@ pub fn vceqzq_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqz_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1814,7 +1814,7 @@ pub fn vceqz_u64(a: uint64x1_t) -> uint64x1_t { } #[doc = "Unsigned compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmeq))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1824,7 +1824,7 @@ pub fn vceqzq_u64(a: uint64x2_t) -> uint64x2_t { } #[doc = "Compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1833,7 +1833,7 @@ pub fn vceqzd_s64(a: i64) -> u64 { } #[doc = "Compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1842,7 +1842,7 @@ pub fn vceqzd_u64(a: u64) -> u64 { } #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -1852,7 +1852,7 @@ pub fn vceqzh_f16(a: f16) -> u16 { } #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzs_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1861,7 +1861,7 @@ pub fn vceqzs_f32(a: f32) -> u32 { } #[doc = "Floating-point compare bitwise equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqzd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1870,7 +1870,7 @@ pub fn vceqzd_f64(a: f64) -> u64 { } #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1879,7 +1879,7 @@ pub fn vcge_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1888,7 +1888,7 @@ pub fn vcgeq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1897,7 +1897,7 @@ pub fn vcge_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1906,7 +1906,7 @@ pub fn vcgeq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1915,7 +1915,7 @@ pub fn vcge_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1924,7 +1924,7 @@ pub fn vcgeq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1933,7 +1933,7 @@ pub fn vcged_f64(a: f64, b: f64) -> u64 { } #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcges_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1942,7 +1942,7 @@ pub fn vcges_f32(a: f32, b: f32) -> u32 { } #[doc = "Compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1951,7 +1951,7 @@ pub fn vcged_s64(a: i64, b: i64) -> u64 { } #[doc = "Compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcged_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1960,7 +1960,7 @@ pub fn vcged_u64(a: u64, b: u64) -> u64 { } #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -1970,7 +1970,7 @@ pub fn vcgeh_f16(a: f16, b: f16) -> u16 { } #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1980,7 +1980,7 @@ pub fn vcgez_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -1990,7 +1990,7 @@ pub fn vcgezq_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2000,7 +2000,7 @@ pub fn vcgez_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2010,7 +2010,7 @@ pub fn vcgezq_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2020,7 +2020,7 @@ pub fn vcgez_s8(a: int8x8_t) -> uint8x8_t { } #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2030,7 +2030,7 @@ pub fn vcgezq_s8(a: int8x16_t) -> uint8x16_t { } #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2040,7 +2040,7 @@ pub fn vcgez_s16(a: int16x4_t) -> uint16x4_t { } #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2050,7 +2050,7 @@ pub fn vcgezq_s16(a: int16x8_t) -> uint16x8_t { } #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2060,7 +2060,7 @@ pub fn vcgez_s32(a: int32x2_t) -> uint32x2_t { } #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2070,7 +2070,7 @@ pub fn vcgezq_s32(a: int32x4_t) -> uint32x4_t { } #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2080,7 +2080,7 @@ pub fn vcgez_s64(a: int64x1_t) -> uint64x1_t { } #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2090,7 +2090,7 @@ pub fn vcgezq_s64(a: int64x2_t) -> uint64x2_t { } #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2099,7 +2099,7 @@ pub fn vcgezd_f64(a: f64) -> u64 { } #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezs_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2108,7 +2108,7 @@ pub fn vcgezs_f32(a: f32) -> u32 { } #[doc = "Compare signed greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2117,7 +2117,7 @@ pub fn vcgezd_s64(a: i64) -> u64 { } #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -2127,7 +2127,7 @@ pub fn vcgezh_f16(a: f16) -> u16 { } #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2136,7 +2136,7 @@ pub fn vcgt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2145,7 +2145,7 @@ pub fn vcgtq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2154,7 +2154,7 @@ pub fn vcgt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2163,7 +2163,7 @@ pub fn vcgtq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhi))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2172,7 +2172,7 @@ pub fn vcgt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhi))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2181,7 +2181,7 @@ pub fn vcgtq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2190,7 +2190,7 @@ pub fn vcgtd_f64(a: f64, b: f64) -> u64 { } #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgts_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2199,7 +2199,7 @@ pub fn vcgts_f32(a: f32, b: f32) -> u32 { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2208,7 +2208,7 @@ pub fn vcgtd_s64(a: i64, b: i64) -> u64 { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2217,7 +2217,7 @@ pub fn vcgtd_u64(a: u64, b: u64) -> u64 { } #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgth_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -2227,7 +2227,7 @@ pub fn vcgth_f16(a: f16, b: f16) -> u16 { } #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2237,7 +2237,7 @@ pub fn vcgtz_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2247,7 +2247,7 @@ pub fn vcgtzq_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2257,7 +2257,7 @@ pub fn vcgtz_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2267,7 +2267,7 @@ pub fn vcgtzq_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2277,7 +2277,7 @@ pub fn vcgtz_s8(a: int8x8_t) -> uint8x8_t { } #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2287,7 +2287,7 @@ pub fn vcgtzq_s8(a: int8x16_t) -> uint8x16_t { } #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2297,7 +2297,7 @@ pub fn vcgtz_s16(a: int16x4_t) -> uint16x4_t { } #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2307,7 +2307,7 @@ pub fn vcgtzq_s16(a: int16x8_t) -> uint16x8_t { } #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2317,7 +2317,7 @@ pub fn vcgtz_s32(a: int32x2_t) -> uint32x2_t { } #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2327,7 +2327,7 @@ pub fn vcgtzq_s32(a: int32x4_t) -> uint32x4_t { } #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2337,7 +2337,7 @@ pub fn vcgtz_s64(a: int64x1_t) -> uint64x1_t { } #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2347,7 +2347,7 @@ pub fn vcgtzq_s64(a: int64x2_t) -> uint64x2_t { } #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2356,7 +2356,7 @@ pub fn vcgtzd_f64(a: f64) -> u64 { } #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzs_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2365,7 +2365,7 @@ pub fn vcgtzs_f32(a: f32) -> u32 { } #[doc = "Compare signed greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2374,7 +2374,7 @@ pub fn vcgtzd_s64(a: i64) -> u64 { } #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -2384,7 +2384,7 @@ pub fn vcgtzh_f16(a: f16) -> u16 { } #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2393,7 +2393,7 @@ pub fn vcle_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2402,7 +2402,7 @@ pub fn vcleq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2411,7 +2411,7 @@ pub fn vcle_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmge))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2420,7 +2420,7 @@ pub fn vcleq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2429,7 +2429,7 @@ pub fn vcle_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2438,7 +2438,7 @@ pub fn vcleq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2447,7 +2447,7 @@ pub fn vcled_f64(a: f64, b: f64) -> u64 { } #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcles_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2456,7 +2456,7 @@ pub fn vcles_f32(a: f32, b: f32) -> u32 { } #[doc = "Compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2465,7 +2465,7 @@ pub fn vcled_u64(a: u64, b: u64) -> u64 { } #[doc = "Compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcled_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2474,7 +2474,7 @@ pub fn vcled_s64(a: i64, b: i64) -> u64 { } #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -2484,7 +2484,7 @@ pub fn vcleh_f16(a: f16, b: f16) -> u16 { } #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2494,7 +2494,7 @@ pub fn vclez_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2504,7 +2504,7 @@ pub fn vclezq_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2514,7 +2514,7 @@ pub fn vclez_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2524,7 +2524,7 @@ pub fn vclezq_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2534,7 +2534,7 @@ pub fn vclez_s8(a: int8x8_t) -> uint8x8_t { } #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2544,7 +2544,7 @@ pub fn vclezq_s8(a: int8x16_t) -> uint8x16_t { } #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2554,7 +2554,7 @@ pub fn vclez_s16(a: int16x4_t) -> uint16x4_t { } #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2564,7 +2564,7 @@ pub fn vclezq_s16(a: int16x8_t) -> uint16x8_t { } #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2574,7 +2574,7 @@ pub fn vclez_s32(a: int32x2_t) -> uint32x2_t { } #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2584,7 +2584,7 @@ pub fn vclezq_s32(a: int32x4_t) -> uint32x4_t { } #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2594,7 +2594,7 @@ pub fn vclez_s64(a: int64x1_t) -> uint64x1_t { } #[doc = "Compare signed less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmle))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2604,7 +2604,7 @@ pub fn vclezq_s64(a: int64x2_t) -> uint64x2_t { } #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2613,7 +2613,7 @@ pub fn vclezd_f64(a: f64) -> u64 { } #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezs_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2622,7 +2622,7 @@ pub fn vclezs_f32(a: f32) -> u32 { } #[doc = "Compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2631,7 +2631,7 @@ pub fn vclezd_s64(a: i64) -> u64 { } #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -2641,7 +2641,7 @@ pub fn vclezh_f16(a: f16) -> u16 { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2650,7 +2650,7 @@ pub fn vclt_f64(a: float64x1_t, b: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2659,7 +2659,7 @@ pub fn vcltq_f64(a: float64x2_t, b: float64x2_t) -> uint64x2_t { } #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2668,7 +2668,7 @@ pub fn vclt_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmgt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2677,7 +2677,7 @@ pub fn vcltq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhi))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2686,7 +2686,7 @@ pub fn vclt_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmhi))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2695,7 +2695,7 @@ pub fn vcltq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2704,7 +2704,7 @@ pub fn vcltd_u64(a: u64, b: u64) -> u64 { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2713,7 +2713,7 @@ pub fn vcltd_s64(a: i64, b: i64) -> u64 { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclth_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -2723,7 +2723,7 @@ pub fn vclth_f16(a: f16, b: f16) -> u16 { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclts_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2732,7 +2732,7 @@ pub fn vclts_f32(a: f32, b: f32) -> u32 { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2741,7 +2741,7 @@ pub fn vcltd_f64(a: f64, b: f64) -> u64 { } #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2751,7 +2751,7 @@ pub fn vcltz_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2761,7 +2761,7 @@ pub fn vcltzq_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2771,7 +2771,7 @@ pub fn vcltz_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2781,7 +2781,7 @@ pub fn vcltzq_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2791,7 +2791,7 @@ pub fn vcltz_s8(a: int8x8_t) -> uint8x8_t { } #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2801,7 +2801,7 @@ pub fn vcltzq_s8(a: int8x16_t) -> uint8x16_t { } #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2811,7 +2811,7 @@ pub fn vcltz_s16(a: int16x4_t) -> uint16x4_t { } #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2821,7 +2821,7 @@ pub fn vcltzq_s16(a: int16x8_t) -> uint16x8_t { } #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2831,7 +2831,7 @@ pub fn vcltz_s32(a: int32x2_t) -> uint32x2_t { } #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2841,7 +2841,7 @@ pub fn vcltzq_s32(a: int32x4_t) -> uint32x4_t { } #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2851,7 +2851,7 @@ pub fn vcltz_s64(a: int64x1_t) -> uint64x1_t { } #[doc = "Compare signed less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmlt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2861,7 +2861,7 @@ pub fn vcltzq_s64(a: int64x2_t) -> uint64x2_t { } #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2870,7 +2870,7 @@ pub fn vcltzd_f64(a: f64) -> u64 { } #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzs_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2879,7 +2879,7 @@ pub fn vcltzs_f32(a: f32) -> u32 { } #[doc = "Compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(asr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -2888,7 +2888,7 @@ pub fn vcltzd_s64(a: i64) -> u64 { } #[doc = "Floating-point compare less than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcmp))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -2898,7 +2898,7 @@ pub fn vcltzh_f16(a: f16) -> u16 { } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -2916,7 +2916,7 @@ pub fn vcmla_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -2934,7 +2934,7 @@ pub fn vcmlaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -2950,7 +2950,7 @@ pub fn vcmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -2966,7 +2966,7 @@ pub fn vcmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -2982,7 +2982,7 @@ pub fn vcmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3011,7 +3011,7 @@ pub fn vcmla_lane_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3044,7 +3044,7 @@ pub fn vcmlaq_lane_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3062,7 +3062,7 @@ pub fn vcmla_lane_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3089,7 +3089,7 @@ pub fn vcmlaq_lane_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3118,7 +3118,7 @@ pub fn vcmla_laneq_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3151,7 +3151,7 @@ pub fn vcmlaq_laneq_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3169,7 +3169,7 @@ pub fn vcmla_laneq_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3196,7 +3196,7 @@ pub fn vcmlaq_laneq_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3214,7 +3214,7 @@ pub fn vcmla_rot180_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3232,7 +3232,7 @@ pub fn vcmlaq_rot180_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> floa } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3248,7 +3248,7 @@ pub fn vcmla_rot180_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3264,7 +3264,7 @@ pub fn vcmlaq_rot180_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3280,7 +3280,7 @@ pub fn vcmlaq_rot180_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3309,7 +3309,7 @@ pub fn vcmla_rot180_lane_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3342,7 +3342,7 @@ pub fn vcmlaq_rot180_lane_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3360,7 +3360,7 @@ pub fn vcmla_rot180_lane_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3387,7 +3387,7 @@ pub fn vcmlaq_rot180_lane_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3416,7 +3416,7 @@ pub fn vcmla_rot180_laneq_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3449,7 +3449,7 @@ pub fn vcmlaq_rot180_laneq_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot180_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3467,7 +3467,7 @@ pub fn vcmla_rot180_laneq_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot180_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3494,7 +3494,7 @@ pub fn vcmlaq_rot180_laneq_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3512,7 +3512,7 @@ pub fn vcmla_rot270_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3530,7 +3530,7 @@ pub fn vcmlaq_rot270_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> floa } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3546,7 +3546,7 @@ pub fn vcmla_rot270_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3562,7 +3562,7 @@ pub fn vcmlaq_rot270_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> floa } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3578,7 +3578,7 @@ pub fn vcmlaq_rot270_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> floa } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3607,7 +3607,7 @@ pub fn vcmla_rot270_lane_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3640,7 +3640,7 @@ pub fn vcmlaq_rot270_lane_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3658,7 +3658,7 @@ pub fn vcmla_rot270_lane_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3685,7 +3685,7 @@ pub fn vcmlaq_rot270_lane_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3714,7 +3714,7 @@ pub fn vcmla_rot270_laneq_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3747,7 +3747,7 @@ pub fn vcmlaq_rot270_laneq_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot270_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3765,7 +3765,7 @@ pub fn vcmla_rot270_laneq_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot270_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3792,7 +3792,7 @@ pub fn vcmlaq_rot270_laneq_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3810,7 +3810,7 @@ pub fn vcmla_rot90_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float1 } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] @@ -3828,7 +3828,7 @@ pub fn vcmlaq_rot90_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3844,7 +3844,7 @@ pub fn vcmla_rot90_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float3 } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3860,7 +3860,7 @@ pub fn vcmlaq_rot90_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[unstable(feature = "stdarch_neon_fcma", issue = "117222")] #[cfg_attr(test, assert_instr(fcmla))] @@ -3876,7 +3876,7 @@ pub fn vcmlaq_rot90_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3905,7 +3905,7 @@ pub fn vcmla_rot90_lane_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3938,7 +3938,7 @@ pub fn vcmlaq_rot90_lane_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3956,7 +3956,7 @@ pub fn vcmla_rot90_lane_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -3983,7 +3983,7 @@ pub fn vcmlaq_rot90_lane_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -4012,7 +4012,7 @@ pub fn vcmla_rot90_laneq_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -4045,7 +4045,7 @@ pub fn vcmlaq_rot90_laneq_f16( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmla_rot90_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -4063,7 +4063,7 @@ pub fn vcmla_rot90_laneq_f32( } #[doc = "Floating-point complex multiply accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcmlaq_rot90_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fcma")] #[cfg_attr(test, assert_instr(fcmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -4090,7 +4090,7 @@ pub fn vcmlaq_rot90_laneq_f32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4111,7 +4111,7 @@ pub fn vcopy_lane_f32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4135,7 +4135,7 @@ pub fn vcopy_lane_s8(a: int8x8_t, b: int8x8_ } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4155,7 +4155,7 @@ pub fn vcopy_lane_s16(a: int16x4_t, b: int16 } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4173,7 +4173,7 @@ pub fn vcopy_lane_s32(a: int32x2_t, b: int32 } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4197,7 +4197,7 @@ pub fn vcopy_lane_u8(a: uint8x8_t, b: uint8x } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4220,7 +4220,7 @@ pub fn vcopy_lane_u16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4241,7 +4241,7 @@ pub fn vcopy_lane_u32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4265,7 +4265,7 @@ pub fn vcopy_lane_p8(a: poly8x8_t, b: poly8x } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_lane_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4288,7 +4288,7 @@ pub fn vcopy_lane_p16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4310,7 +4310,7 @@ pub fn vcopy_laneq_f32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4336,7 +4336,7 @@ pub fn vcopy_laneq_s8(a: int8x8_t, b: int8x1 } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4360,7 +4360,7 @@ pub fn vcopy_laneq_s16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4382,7 +4382,7 @@ pub fn vcopy_laneq_s32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4411,7 +4411,7 @@ pub fn vcopy_laneq_u8( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4435,7 +4435,7 @@ pub fn vcopy_laneq_u16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4457,7 +4457,7 @@ pub fn vcopy_laneq_u32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4486,7 +4486,7 @@ pub fn vcopy_laneq_p8( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopy_laneq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4510,7 +4510,7 @@ pub fn vcopy_laneq_p16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4534,7 +4534,7 @@ pub fn vcopyq_lane_f32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4556,7 +4556,7 @@ pub fn vcopyq_lane_f64( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4578,7 +4578,7 @@ pub fn vcopyq_lane_s64( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4600,7 +4600,7 @@ pub fn vcopyq_lane_u64( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 1, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4622,7 +4622,7 @@ pub fn vcopyq_lane_p64( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -4992,7 +4992,7 @@ pub fn vcopyq_lane_s8(a: int8x16_t, b: int8x } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -5020,7 +5020,7 @@ pub fn vcopyq_lane_s16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -5044,7 +5044,7 @@ pub fn vcopyq_lane_s32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -5417,7 +5417,7 @@ pub fn vcopyq_lane_u8( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -5445,7 +5445,7 @@ pub fn vcopyq_lane_u16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -5469,7 +5469,7 @@ pub fn vcopyq_lane_u32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -5842,7 +5842,7 @@ pub fn vcopyq_lane_p8( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_lane_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -5870,7 +5870,7 @@ pub fn vcopyq_lane_p16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -5893,7 +5893,7 @@ pub fn vcopyq_laneq_f32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -5914,7 +5914,7 @@ pub fn vcopyq_laneq_f64( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -6285,7 +6285,7 @@ pub fn vcopyq_laneq_s8( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -6312,7 +6312,7 @@ pub fn vcopyq_laneq_s16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -6335,7 +6335,7 @@ pub fn vcopyq_laneq_s32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -6356,7 +6356,7 @@ pub fn vcopyq_laneq_s64( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -6727,7 +6727,7 @@ pub fn vcopyq_laneq_u8( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -6754,7 +6754,7 @@ pub fn vcopyq_laneq_u16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -6777,7 +6777,7 @@ pub fn vcopyq_laneq_u32( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -6798,7 +6798,7 @@ pub fn vcopyq_laneq_u64( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -7169,7 +7169,7 @@ pub fn vcopyq_laneq_p8( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -7196,7 +7196,7 @@ pub fn vcopyq_laneq_p16( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcopyq_laneq_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(mov, LANE1 = 0, LANE2 = 0))] #[rustc_legacy_const_generics(1, 3)] @@ -7217,7 +7217,7 @@ pub fn vcopyq_laneq_p64( } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7226,7 +7226,7 @@ pub fn vcreate_f64(a: u64) -> float64x1_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7235,7 +7235,7 @@ pub fn vcvt_f32_f64(a: float64x2_t) -> float32x2_t { } #[doc = "Floating-point convert to higher precision long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7244,7 +7244,7 @@ pub fn vcvt_f64_f32(a: float32x2_t) -> float64x2_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7253,7 +7253,7 @@ pub fn vcvt_f64_s64(a: int64x1_t) -> float64x1_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7262,7 +7262,7 @@ pub fn vcvtq_f64_s64(a: int64x2_t) -> float64x2_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f64_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7271,7 +7271,7 @@ pub fn vcvt_f64_u64(a: uint64x1_t) -> float64x1_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f64_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7280,7 +7280,7 @@ pub fn vcvtq_f64_u64(a: uint64x2_t) -> float64x2_t { } #[doc = "Floating-point convert to lower precision"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f16_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtn2))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -7290,7 +7290,7 @@ pub fn vcvt_high_f16_f32(a: float16x4_t, b: float32x4_t) -> float16x8_t { } #[doc = "Floating-point convert to higher precision"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtl2))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -7300,7 +7300,7 @@ pub fn vcvt_high_f32_f16(a: float16x8_t) -> float32x4_t { } #[doc = "Floating-point convert to lower precision narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f32_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7309,7 +7309,7 @@ pub fn vcvt_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { } #[doc = "Floating-point convert to higher precision long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_high_f64_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7321,7 +7321,7 @@ pub fn vcvt_high_f64_f32(a: float32x4_t) -> float64x2_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] @@ -7339,7 +7339,7 @@ pub fn vcvt_n_f64_s64(a: int64x1_t) -> float64x1_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] @@ -7357,7 +7357,7 @@ pub fn vcvtq_n_f64_s64(a: int64x2_t) -> float64x2_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f64_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] @@ -7375,7 +7375,7 @@ pub fn vcvt_n_f64_u64(a: uint64x1_t) -> float64x1_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f64_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] @@ -7393,7 +7393,7 @@ pub fn vcvtq_n_f64_u64(a: uint64x2_t) -> float64x2_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] @@ -7411,7 +7411,7 @@ pub fn vcvt_n_s64_f64(a: float64x1_t) -> int64x1_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] @@ -7429,7 +7429,7 @@ pub fn vcvtq_n_s64_f64(a: float64x2_t) -> int64x2_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] @@ -7447,7 +7447,7 @@ pub fn vcvt_n_u64_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] @@ -7465,7 +7465,7 @@ pub fn vcvtq_n_u64_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7481,7 +7481,7 @@ pub fn vcvt_s64_f64(a: float64x1_t) -> int64x1_t { } #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7497,7 +7497,7 @@ pub fn vcvtq_s64_f64(a: float64x2_t) -> int64x2_t { } #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7513,7 +7513,7 @@ pub fn vcvt_u64_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7529,7 +7529,7 @@ pub fn vcvtq_u64_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtas))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -7546,7 +7546,7 @@ pub fn vcvta_s16_f16(a: float16x4_t) -> int16x4_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtas))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -7563,7 +7563,7 @@ pub fn vcvtaq_s16_f16(a: float16x8_t) -> int16x8_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7579,7 +7579,7 @@ pub fn vcvta_s32_f32(a: float32x2_t) -> int32x2_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7595,7 +7595,7 @@ pub fn vcvtaq_s32_f32(a: float32x4_t) -> int32x4_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7611,7 +7611,7 @@ pub fn vcvta_s64_f64(a: float64x1_t) -> int64x1_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7627,7 +7627,7 @@ pub fn vcvtaq_s64_f64(a: float64x2_t) -> int64x2_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtau))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -7644,7 +7644,7 @@ pub fn vcvta_u16_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtau))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -7661,7 +7661,7 @@ pub fn vcvtaq_u16_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7677,7 +7677,7 @@ pub fn vcvta_u32_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7693,7 +7693,7 @@ pub fn vcvtaq_u32_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvta_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7709,7 +7709,7 @@ pub fn vcvta_u64_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtaq_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7725,7 +7725,7 @@ pub fn vcvtaq_u64_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtas))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7735,7 +7735,7 @@ pub fn vcvtah_s16_f16(a: f16) -> i16 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtas))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7752,7 +7752,7 @@ pub fn vcvtah_s32_f16(a: f16) -> i32 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_s64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtas))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7769,7 +7769,7 @@ pub fn vcvtah_s64_f16(a: f16) -> i64 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtau))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7779,7 +7779,7 @@ pub fn vcvtah_u16_f16(a: f16) -> u16 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtau))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7796,7 +7796,7 @@ pub fn vcvtah_u32_f16(a: f16) -> u32 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtah_u64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtau))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7813,7 +7813,7 @@ pub fn vcvtah_u64_f16(a: f16) -> u64 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7829,7 +7829,7 @@ pub fn vcvtas_s32_f32(a: f32) -> i32 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtas))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7845,7 +7845,7 @@ pub fn vcvtad_s64_f64(a: f64) -> i64 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtas_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7861,7 +7861,7 @@ pub fn vcvtas_u32_f32(a: f32) -> u32 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtad_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtau))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7877,7 +7877,7 @@ pub fn vcvtad_u64_f64(a: f64) -> u64 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7886,7 +7886,7 @@ pub fn vcvtd_f64_s64(a: i64) -> f64 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -7895,7 +7895,7 @@ pub fn vcvts_f32_s32(a: i32) -> f32 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(scvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7905,7 +7905,7 @@ pub fn vcvth_f16_s16(a: i16) -> f16 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s32)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(scvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7915,7 +7915,7 @@ pub fn vcvth_f16_s32(a: i32) -> f16 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_s64)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(scvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7925,7 +7925,7 @@ pub fn vcvth_f16_s64(a: i64) -> f16 { } #[doc = "Unsigned fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(ucvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7935,7 +7935,7 @@ pub fn vcvth_f16_u16(a: u16) -> f16 { } #[doc = "Unsigned fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u32)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(ucvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7945,7 +7945,7 @@ pub fn vcvth_f16_u32(a: u32) -> f16 { } #[doc = "Unsigned fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_f16_u64)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(ucvtf))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -7955,7 +7955,7 @@ pub fn vcvth_f16_u64(a: u64) -> f16 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -7967,7 +7967,7 @@ pub fn vcvth_n_f16_s16(a: i16) -> f16 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s32)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -7986,7 +7986,7 @@ pub fn vcvth_n_f16_s32(a: i32) -> f16 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_s64)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -8005,7 +8005,7 @@ pub fn vcvth_n_f16_s64(a: i64) -> f16 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -8017,7 +8017,7 @@ pub fn vcvth_n_f16_u16(a: u16) -> f16 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u32)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -8036,7 +8036,7 @@ pub fn vcvth_n_f16_u32(a: u32) -> f16 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_f16_u64)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -8055,7 +8055,7 @@ pub fn vcvth_n_f16_u64(a: u64) -> f16 { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -8067,7 +8067,7 @@ pub fn vcvth_n_s16_f16(a: f16) -> i16 { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -8086,7 +8086,7 @@ pub fn vcvth_n_s32_f16(a: f16) -> i32 { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_s64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -8105,7 +8105,7 @@ pub fn vcvth_n_s64_f16(a: f16) -> i64 { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -8117,7 +8117,7 @@ pub fn vcvth_n_u16_f16(a: f16) -> u16 { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -8136,7 +8136,7 @@ pub fn vcvth_n_u32_f16(a: f16) -> u32 { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_n_u64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -8155,7 +8155,7 @@ pub fn vcvth_n_u64_f16(a: f16) -> u64 { } #[doc = "Floating-point convert to signed fixed-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzs))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8165,7 +8165,7 @@ pub fn vcvth_s16_f16(a: f16) -> i16 { } #[doc = "Floating-point convert to signed fixed-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzs))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8175,7 +8175,7 @@ pub fn vcvth_s32_f16(a: f16) -> i32 { } #[doc = "Floating-point convert to signed fixed-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_s64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzs))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8185,7 +8185,7 @@ pub fn vcvth_s64_f16(a: f16) -> i64 { } #[doc = "Floating-point convert to unsigned fixed-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8195,7 +8195,7 @@ pub fn vcvth_u16_f16(a: f16) -> u16 { } #[doc = "Floating-point convert to unsigned fixed-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8205,7 +8205,7 @@ pub fn vcvth_u32_f16(a: f16) -> u32 { } #[doc = "Floating-point convert to unsigned fixed-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvth_u64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtzu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8215,7 +8215,7 @@ pub fn vcvth_u64_f16(a: f16) -> u64 { } #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtms))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8232,7 +8232,7 @@ pub fn vcvtm_s16_f16(a: float16x4_t) -> int16x4_t { } #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtms))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8249,7 +8249,7 @@ pub fn vcvtmq_s16_f16(a: float16x8_t) -> int16x8_t { } #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8265,7 +8265,7 @@ pub fn vcvtm_s32_f32(a: float32x2_t) -> int32x2_t { } #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8281,7 +8281,7 @@ pub fn vcvtmq_s32_f32(a: float32x4_t) -> int32x4_t { } #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8297,7 +8297,7 @@ pub fn vcvtm_s64_f64(a: float64x1_t) -> int64x1_t { } #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8313,7 +8313,7 @@ pub fn vcvtmq_s64_f64(a: float64x2_t) -> int64x2_t { } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtmu))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8330,7 +8330,7 @@ pub fn vcvtm_u16_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtmu))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8347,7 +8347,7 @@ pub fn vcvtmq_u16_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8363,7 +8363,7 @@ pub fn vcvtm_u32_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8379,7 +8379,7 @@ pub fn vcvtmq_u32_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtm_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8395,7 +8395,7 @@ pub fn vcvtm_u64_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmq_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8411,7 +8411,7 @@ pub fn vcvtmq_u64_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Floating-point convert to integer, rounding towards minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtms))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8421,7 +8421,7 @@ pub fn vcvtmh_s16_f16(a: f16) -> i16 { } #[doc = "Floating-point convert to integer, rounding towards minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtms))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8438,7 +8438,7 @@ pub fn vcvtmh_s32_f16(a: f16) -> i32 { } #[doc = "Floating-point convert to integer, rounding towards minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_s64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtms))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8455,7 +8455,7 @@ pub fn vcvtmh_s64_f16(a: f16) -> i64 { } #[doc = "Floating-point convert to integer, rounding towards minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtmu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8465,7 +8465,7 @@ pub fn vcvtmh_u16_f16(a: f16) -> u16 { } #[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtmu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8482,7 +8482,7 @@ pub fn vcvtmh_u32_f16(a: f16) -> u32 { } #[doc = "Floating-point convert to unsigned integer, rounding towards minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmh_u64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtmu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8499,7 +8499,7 @@ pub fn vcvtmh_u64_f16(a: f16) -> u64 { } #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8515,7 +8515,7 @@ pub fn vcvtms_s32_f32(a: f32) -> i32 { } #[doc = "Floating-point convert to signed integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtms))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8531,7 +8531,7 @@ pub fn vcvtmd_s64_f64(a: f64) -> i64 { } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtms_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8547,7 +8547,7 @@ pub fn vcvtms_u32_f32(a: f32) -> u32 { } #[doc = "Floating-point convert to unsigned integer, rounding toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtmd_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtmu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8563,7 +8563,7 @@ pub fn vcvtmd_u64_f64(a: f64) -> u64 { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtns))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8580,7 +8580,7 @@ pub fn vcvtn_s16_f16(a: float16x4_t) -> int16x4_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtns))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8597,7 +8597,7 @@ pub fn vcvtnq_s16_f16(a: float16x8_t) -> int16x8_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8613,7 +8613,7 @@ pub fn vcvtn_s32_f32(a: float32x2_t) -> int32x2_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8629,7 +8629,7 @@ pub fn vcvtnq_s32_f32(a: float32x4_t) -> int32x4_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8645,7 +8645,7 @@ pub fn vcvtn_s64_f64(a: float64x1_t) -> int64x1_t { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8661,7 +8661,7 @@ pub fn vcvtnq_s64_f64(a: float64x2_t) -> int64x2_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtnu))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8678,7 +8678,7 @@ pub fn vcvtn_u16_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtnu))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8695,7 +8695,7 @@ pub fn vcvtnq_u16_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8711,7 +8711,7 @@ pub fn vcvtn_u32_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8727,7 +8727,7 @@ pub fn vcvtnq_u32_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtn_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8743,7 +8743,7 @@ pub fn vcvtn_u64_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnq_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8759,7 +8759,7 @@ pub fn vcvtnq_u64_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtns))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8769,7 +8769,7 @@ pub fn vcvtnh_s16_f16(a: f16) -> i16 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtns))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8786,7 +8786,7 @@ pub fn vcvtnh_s32_f16(a: f16) -> i32 { } #[doc = "Floating-point convert to integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_s64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtns))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8803,7 +8803,7 @@ pub fn vcvtnh_s64_f16(a: f16) -> i64 { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtnu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8813,7 +8813,7 @@ pub fn vcvtnh_u16_f16(a: f16) -> u16 { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtnu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8830,7 +8830,7 @@ pub fn vcvtnh_u32_f16(a: f16) -> u32 { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnh_u64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtnu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -8847,7 +8847,7 @@ pub fn vcvtnh_u64_f16(a: f16) -> u64 { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8863,7 +8863,7 @@ pub fn vcvtns_s32_f32(a: f32) -> i32 { } #[doc = "Floating-point convert to signed integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtns))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8879,7 +8879,7 @@ pub fn vcvtnd_s64_f64(a: f64) -> i64 { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtns_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8895,7 +8895,7 @@ pub fn vcvtns_u32_f32(a: f32) -> u32 { } #[doc = "Floating-point convert to unsigned integer, rounding to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtnd_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtnu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8911,7 +8911,7 @@ pub fn vcvtnd_u64_f64(a: f64) -> u64 { } #[doc = "Floating-point convert to signed integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtps))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8928,7 +8928,7 @@ pub fn vcvtp_s16_f16(a: float16x4_t) -> int16x4_t { } #[doc = "Floating-point convert to signed integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtps))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -8945,7 +8945,7 @@ pub fn vcvtpq_s16_f16(a: float16x8_t) -> int16x8_t { } #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8961,7 +8961,7 @@ pub fn vcvtp_s32_f32(a: float32x2_t) -> int32x2_t { } #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8977,7 +8977,7 @@ pub fn vcvtpq_s32_f32(a: float32x4_t) -> int32x4_t { } #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -8993,7 +8993,7 @@ pub fn vcvtp_s64_f64(a: float64x1_t) -> int64x1_t { } #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9009,7 +9009,7 @@ pub fn vcvtpq_s64_f64(a: float64x2_t) -> int64x2_t { } #[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtpu))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -9026,7 +9026,7 @@ pub fn vcvtp_u16_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtpu))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -9043,7 +9043,7 @@ pub fn vcvtpq_u16_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9059,7 +9059,7 @@ pub fn vcvtp_u32_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9075,7 +9075,7 @@ pub fn vcvtpq_u32_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtp_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9091,7 +9091,7 @@ pub fn vcvtp_u64_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpq_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9107,7 +9107,7 @@ pub fn vcvtpq_u64_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Floating-point convert to integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtps))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -9117,7 +9117,7 @@ pub fn vcvtph_s16_f16(a: f16) -> i16 { } #[doc = "Floating-point convert to integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtps))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -9134,7 +9134,7 @@ pub fn vcvtph_s32_f16(a: f16) -> i32 { } #[doc = "Floating-point convert to integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_s64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtps))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -9151,7 +9151,7 @@ pub fn vcvtph_s64_f16(a: f16) -> i64 { } #[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtpu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -9161,7 +9161,7 @@ pub fn vcvtph_u16_f16(a: f16) -> u16 { } #[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u32_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtpu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -9178,7 +9178,7 @@ pub fn vcvtph_u32_f16(a: f16) -> u32 { } #[doc = "Floating-point convert to unsigned integer, rounding to plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtph_u64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fcvtpu))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -9195,7 +9195,7 @@ pub fn vcvtph_u64_f16(a: f16) -> u64 { } #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9211,7 +9211,7 @@ pub fn vcvtps_s32_f32(a: f32) -> i32 { } #[doc = "Floating-point convert to signed integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9227,7 +9227,7 @@ pub fn vcvtpd_s64_f64(a: f64) -> i64 { } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtps_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9243,7 +9243,7 @@ pub fn vcvtps_u32_f32(a: f32) -> u32 { } #[doc = "Floating-point convert to unsigned integer, rounding toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtpd_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtpu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9259,7 +9259,7 @@ pub fn vcvtpd_u64_f64(a: f64) -> u64 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_f32_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9268,7 +9268,7 @@ pub fn vcvts_f32_u32(a: u32) -> f32 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_f64_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9277,7 +9277,7 @@ pub fn vcvtd_f64_u64(a: u64) -> f64 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9295,7 +9295,7 @@ pub fn vcvts_n_f32_s32(a: i32) -> f32 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(scvtf, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9313,7 +9313,7 @@ pub fn vcvtd_n_f64_s64(a: i64) -> f64 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_f32_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9331,7 +9331,7 @@ pub fn vcvts_n_f32_u32(a: u32) -> f32 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_f64_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9349,7 +9349,7 @@ pub fn vcvtd_n_f64_u64(a: u64) -> f64 { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9367,7 +9367,7 @@ pub fn vcvts_n_s32_f32(a: f32) -> i32 { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9385,7 +9385,7 @@ pub fn vcvtd_n_s64_f64(a: f64) -> i64 { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_n_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9403,7 +9403,7 @@ pub fn vcvts_n_u32_f32(a: f32) -> u32 { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_n_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9421,7 +9421,7 @@ pub fn vcvtd_n_u64_f64(a: f64) -> u64 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9430,7 +9430,7 @@ pub fn vcvts_s32_f32(a: f32) -> i32 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzs))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9439,7 +9439,7 @@ pub fn vcvtd_s64_f64(a: f64) -> i64 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvts_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9448,7 +9448,7 @@ pub fn vcvts_u32_f32(a: f32) -> u32 { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtd_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtzu))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9457,7 +9457,7 @@ pub fn vcvtd_u64_f64(a: f64) -> u64 { } #[doc = "Floating-point convert to lower precision narrow, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_f32_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9473,7 +9473,7 @@ pub fn vcvtx_f32_f64(a: float64x2_t) -> float32x2_t { } #[doc = "Floating-point convert to lower precision narrow, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtx_high_f32_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtxn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9482,7 +9482,7 @@ pub fn vcvtx_high_f32_f64(a: float32x2_t, b: float64x2_t) -> float32x4_t { } #[doc = "Floating-point convert to lower precision narrow, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtxd_f32_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fcvtxn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -9491,7 +9491,7 @@ pub fn vcvtxd_f32_f64(a: f64) -> f32 { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -9501,7 +9501,7 @@ pub fn vdiv_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -9511,7 +9511,7 @@ pub fn vdivq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fdiv))] @@ -9520,7 +9520,7 @@ pub fn vdiv_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fdiv))] @@ -9529,7 +9529,7 @@ pub fn vdivq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdiv_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fdiv))] @@ -9538,7 +9538,7 @@ pub fn vdiv_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fdiv))] @@ -9547,7 +9547,7 @@ pub fn vdivq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdivh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -9557,7 +9557,7 @@ pub fn vdivh_f16(a: f16, b: f16) -> f16 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] @@ -9568,7 +9568,7 @@ pub fn vdup_lane_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] @@ -9579,7 +9579,7 @@ pub fn vdup_lane_p64(a: poly64x1_t) -> poly64x1_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] @@ -9590,7 +9590,7 @@ pub fn vdup_laneq_f64(a: float64x2_t) -> float64x1_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] @@ -9601,7 +9601,7 @@ pub fn vdup_laneq_p64(a: poly64x2_t) -> poly64x1_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] @@ -9612,7 +9612,7 @@ pub fn vdupb_lane_s8(a: int8x8_t) -> i8 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] @@ -9623,7 +9623,7 @@ pub fn vduph_laneq_s16(a: int16x8_t) -> i16 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] @@ -9634,7 +9634,7 @@ pub fn vdupb_lane_u8(a: uint8x8_t) -> u8 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] @@ -9645,7 +9645,7 @@ pub fn vduph_laneq_u16(a: uint16x8_t) -> u16 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_lane_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] @@ -9656,7 +9656,7 @@ pub fn vdupb_lane_p8(a: poly8x8_t) -> p8 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] @@ -9667,7 +9667,7 @@ pub fn vduph_laneq_p16(a: poly16x8_t) -> p16 { } #[doc = "Extract an element from a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 8))] #[rustc_legacy_const_generics(1)] @@ -9678,7 +9678,7 @@ pub fn vdupb_laneq_s8(a: int8x16_t) -> i8 { } #[doc = "Extract an element from a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 8))] #[rustc_legacy_const_generics(1)] @@ -9689,7 +9689,7 @@ pub fn vdupb_laneq_u8(a: uint8x16_t) -> u8 { } #[doc = "Extract an element from a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupb_laneq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 8))] #[rustc_legacy_const_generics(1)] @@ -9700,7 +9700,7 @@ pub fn vdupb_laneq_p8(a: poly8x16_t) -> p8 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] @@ -9711,7 +9711,7 @@ pub fn vdupd_lane_f64(a: float64x1_t) -> f64 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] @@ -9722,7 +9722,7 @@ pub fn vdupd_lane_s64(a: int64x1_t) -> i64 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_lane_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 0))] #[rustc_legacy_const_generics(1)] @@ -9733,7 +9733,7 @@ pub fn vdupd_lane_u64(a: uint64x1_t) -> u64 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -9745,7 +9745,7 @@ pub fn vduph_lane_f16(a: float16x4_t) -> f16 { } #[doc = "Extract an element from a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(nop, N = 4))] #[rustc_legacy_const_generics(1)] #[target_feature(enable = "neon,fp16")] @@ -9757,7 +9757,7 @@ pub fn vduph_laneq_f16(a: float16x8_t) -> f16 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] @@ -9768,7 +9768,7 @@ pub fn vdupq_lane_f64(a: float64x1_t) -> float64x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 0))] #[rustc_legacy_const_generics(1)] @@ -9779,7 +9779,7 @@ pub fn vdupq_lane_p64(a: poly64x1_t) -> poly64x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] @@ -9790,7 +9790,7 @@ pub fn vdupq_laneq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(dup, N = 1))] #[rustc_legacy_const_generics(1)] @@ -9801,7 +9801,7 @@ pub fn vdupq_laneq_p64(a: poly64x2_t) -> poly64x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] @@ -9812,7 +9812,7 @@ pub fn vdups_lane_f32(a: float32x2_t) -> f32 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] @@ -9823,7 +9823,7 @@ pub fn vdupd_laneq_f64(a: float64x2_t) -> f64 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] @@ -9834,7 +9834,7 @@ pub fn vdups_lane_s32(a: int32x2_t) -> i32 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] @@ -9845,7 +9845,7 @@ pub fn vdupd_laneq_s64(a: int64x2_t) -> i64 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] @@ -9856,7 +9856,7 @@ pub fn vdups_lane_u32(a: uint32x2_t) -> u32 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupd_laneq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 1))] #[rustc_legacy_const_generics(1)] @@ -9867,7 +9867,7 @@ pub fn vdupd_laneq_u64(a: uint64x2_t) -> u64 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9878,7 +9878,7 @@ pub fn vdups_laneq_f32(a: float32x4_t) -> f32 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9889,7 +9889,7 @@ pub fn vduph_lane_s16(a: int16x4_t) -> i16 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9900,7 +9900,7 @@ pub fn vdups_laneq_s32(a: int32x4_t) -> i32 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9911,7 +9911,7 @@ pub fn vduph_lane_u16(a: uint16x4_t) -> u16 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdups_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9922,7 +9922,7 @@ pub fn vdups_laneq_u32(a: uint32x4_t) -> u32 { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vduph_lane_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, N = 2))] #[rustc_legacy_const_generics(1)] @@ -9933,7 +9933,7 @@ pub fn vduph_lane_p16(a: poly16x4_t) -> p16 { } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] @@ -9949,7 +9949,7 @@ pub fn veor3q_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] @@ -9965,7 +9965,7 @@ pub fn veor3q_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] @@ -9981,7 +9981,7 @@ pub fn veor3q_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] @@ -9997,7 +9997,7 @@ pub fn veor3q_s64(a: int64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] @@ -10013,7 +10013,7 @@ pub fn veor3q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] @@ -10029,7 +10029,7 @@ pub fn veor3q_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] @@ -10045,7 +10045,7 @@ pub fn veor3q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { } #[doc = "Three-way exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor3q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] #[cfg_attr(test, assert_instr(eor3))] @@ -10061,7 +10061,7 @@ pub fn veor3q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] @@ -10078,7 +10078,7 @@ pub fn vextq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ext, N = 1))] #[rustc_legacy_const_generics(2)] @@ -10095,7 +10095,7 @@ pub fn vextq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { } #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmadd))] @@ -10104,7 +10104,7 @@ pub fn vfma_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -10120,7 +10120,7 @@ pub fn vfma_lane_f16( } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -10136,7 +10136,7 @@ pub fn vfma_laneq_f16( } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -10152,7 +10152,7 @@ pub fn vfmaq_lane_f16( } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -10168,7 +10168,7 @@ pub fn vfmaq_laneq_f16( } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10183,7 +10183,7 @@ pub fn vfma_lane_f32( } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10198,7 +10198,7 @@ pub fn vfma_laneq_f32( } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10213,7 +10213,7 @@ pub fn vfmaq_lane_f32( } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10228,7 +10228,7 @@ pub fn vfmaq_laneq_f32( } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10243,7 +10243,7 @@ pub fn vfmaq_laneq_f64( } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10258,7 +10258,7 @@ pub fn vfma_lane_f64( } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10273,7 +10273,7 @@ pub fn vfma_laneq_f64( } #[doc = "Floating-point fused Multiply-Subtract from accumulator."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -10283,7 +10283,7 @@ pub fn vfma_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t { } #[doc = "Floating-point fused Multiply-Subtract from accumulator."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -10293,7 +10293,7 @@ pub fn vfmaq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t { } #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmadd))] @@ -10302,7 +10302,7 @@ pub fn vfma_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10316,7 +10316,7 @@ pub fn vfmad_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmadd))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -10326,7 +10326,7 @@ pub fn vfmah_f16(a: f16, b: f16, c: f16) -> f16 { } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -10341,7 +10341,7 @@ pub fn vfmah_lane_f16(a: f16, b: f16, v: float16x4_t) -> f16 { } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmah_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -10356,7 +10356,7 @@ pub fn vfmah_laneq_f16(a: f16, b: f16, v: float16x8_t) -> f16 { } #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmla))] @@ -10365,7 +10365,7 @@ pub fn vfmaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmla, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10380,7 +10380,7 @@ pub fn vfmaq_lane_f64( } #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmla))] @@ -10389,7 +10389,7 @@ pub fn vfmaq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10403,7 +10403,7 @@ pub fn vfmas_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmas_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10417,7 +10417,7 @@ pub fn vfmas_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { } #[doc = "Floating-point fused multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmad_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmadd, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10431,7 +10431,7 @@ pub fn vfmad_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { } #[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_high_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -10449,7 +10449,7 @@ pub fn vfmlal_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float3 } #[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_high_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -10467,7 +10467,7 @@ pub fn vfmlalq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float } #[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_high_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlal2, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10484,7 +10484,7 @@ pub fn vfmlal_lane_high_f16( } #[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_high_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlal2, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10501,7 +10501,7 @@ pub fn vfmlal_laneq_high_f16( } #[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_high_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlal2, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10518,7 +10518,7 @@ pub fn vfmlalq_lane_high_f16( } #[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_high_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlal2, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10535,7 +10535,7 @@ pub fn vfmlalq_laneq_high_f16( } #[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_lane_low_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlal, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10552,7 +10552,7 @@ pub fn vfmlal_lane_low_f16( } #[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_laneq_low_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlal, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10569,7 +10569,7 @@ pub fn vfmlal_laneq_low_f16( } #[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_lane_low_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlal, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10586,7 +10586,7 @@ pub fn vfmlalq_lane_low_f16( } #[doc = "Floating-point fused Multiply-Add Long to accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_laneq_low_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlal, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10603,7 +10603,7 @@ pub fn vfmlalq_laneq_low_f16( } #[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlal_low_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -10621,7 +10621,7 @@ pub fn vfmlal_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32 } #[doc = "Floating-point fused Multiply-Add Long to accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlalq_low_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -10639,7 +10639,7 @@ pub fn vfmlalq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float3 } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_high_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -10657,7 +10657,7 @@ pub fn vfmlsl_high_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float3 } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_high_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -10675,7 +10675,7 @@ pub fn vfmlslq_high_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_high_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10692,7 +10692,7 @@ pub fn vfmlsl_lane_high_f16( } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_high_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10709,7 +10709,7 @@ pub fn vfmlsl_laneq_high_f16( } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_high_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10726,7 +10726,7 @@ pub fn vfmlslq_lane_high_f16( } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_high_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlsl2, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10743,7 +10743,7 @@ pub fn vfmlslq_laneq_high_f16( } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_lane_low_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlsl, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10760,7 +10760,7 @@ pub fn vfmlsl_lane_low_f16( } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_laneq_low_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlsl, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10777,7 +10777,7 @@ pub fn vfmlsl_laneq_low_f16( } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_lane_low_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlsl, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10794,7 +10794,7 @@ pub fn vfmlslq_lane_low_f16( } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (by element)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_laneq_low_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmlsl, LANE = 0))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] @@ -10811,7 +10811,7 @@ pub fn vfmlslq_laneq_low_f16( } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlsl_low_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -10829,7 +10829,7 @@ pub fn vfmlsl_low_f16(r: float32x2_t, a: float16x4_t, b: float16x4_t) -> float32 } #[doc = "Floating-point fused Multiply-Subtract Long from accumulator (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmlslq_low_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(not(target_arch = "arm"), target_feature(enable = "fhm"))] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -10847,7 +10847,7 @@ pub fn vfmlslq_low_f16(r: float32x4_t, a: float16x8_t, b: float16x8_t) -> float3 } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -10859,7 +10859,7 @@ pub fn vfms_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -10875,7 +10875,7 @@ pub fn vfms_lane_f16( } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -10891,7 +10891,7 @@ pub fn vfms_laneq_f16( } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -10907,7 +10907,7 @@ pub fn vfmsq_lane_f16( } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -10923,7 +10923,7 @@ pub fn vfmsq_laneq_f16( } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10938,7 +10938,7 @@ pub fn vfms_lane_f32( } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10953,7 +10953,7 @@ pub fn vfms_laneq_f32( } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10968,7 +10968,7 @@ pub fn vfmsq_lane_f32( } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10983,7 +10983,7 @@ pub fn vfmsq_laneq_f32( } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -10998,7 +10998,7 @@ pub fn vfmsq_laneq_f64( } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -11013,7 +11013,7 @@ pub fn vfms_lane_f64( } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -11028,7 +11028,7 @@ pub fn vfms_laneq_f64( } #[doc = "Floating-point fused Multiply-Subtract from accumulator."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -11038,7 +11038,7 @@ pub fn vfms_n_f16(a: float16x4_t, b: float16x4_t, c: f16) -> float16x4_t { } #[doc = "Floating-point fused Multiply-Subtract from accumulator."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -11048,7 +11048,7 @@ pub fn vfmsq_n_f16(a: float16x8_t, b: float16x8_t, c: f16) -> float16x8_t { } #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11057,7 +11057,7 @@ pub fn vfms_n_f64(a: float64x1_t, b: float64x1_t, c: f64) -> float64x1_t { } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmsub))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -11067,7 +11067,7 @@ pub fn vfmsh_f16(a: f16, b: f16, c: f16) -> f16 { } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -11082,7 +11082,7 @@ pub fn vfmsh_lane_f16(a: f16, b: f16, v: float16x4_t) -> f16 { } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsh_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] #[target_feature(enable = "neon,fp16")] @@ -11097,7 +11097,7 @@ pub fn vfmsh_laneq_f16(a: f16, b: f16, v: float16x8_t) -> f16 { } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11109,7 +11109,7 @@ pub fn vfmsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -11124,7 +11124,7 @@ pub fn vfmsq_lane_f64( } #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmls))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11133,7 +11133,7 @@ pub fn vfmsq_n_f64(a: float64x2_t, b: float64x2_t, c: f64) -> float64x2_t { } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -11143,7 +11143,7 @@ pub fn vfmss_lane_f32(a: f32, b: f32, c: float32x2_t) -> f32 { } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmss_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -11153,7 +11153,7 @@ pub fn vfmss_laneq_f32(a: f32, b: f32, c: float32x4_t) -> f32 { } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -11163,7 +11163,7 @@ pub fn vfmsd_lane_f64(a: f64, b: f64, c: float64x1_t) -> f64 { } #[doc = "Floating-point fused multiply-subtract to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsd_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmsub, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -11175,7 +11175,7 @@ pub fn vfmsd_laneq_f64(a: f64, b: f64, c: float64x2_t) -> f64 { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(test, assert_instr(ldr))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -11187,7 +11187,7 @@ pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(test, assert_instr(ldr))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -11199,7 +11199,7 @@ pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11210,7 +11210,7 @@ pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11221,7 +11221,7 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11232,7 +11232,7 @@ pub unsafe fn vld1_f64(ptr: *const f64) -> float64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11243,7 +11243,7 @@ pub unsafe fn vld1q_f64(ptr: *const f64) -> float64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11254,7 +11254,7 @@ pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11265,7 +11265,7 @@ pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11276,7 +11276,7 @@ pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11287,7 +11287,7 @@ pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11298,7 +11298,7 @@ pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11309,7 +11309,7 @@ pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11320,7 +11320,7 @@ pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11331,7 +11331,7 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11342,7 +11342,7 @@ pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11353,7 +11353,7 @@ pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11364,7 +11364,7 @@ pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11375,7 +11375,7 @@ pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11386,7 +11386,7 @@ pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11397,7 +11397,7 @@ pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11408,7 +11408,7 @@ pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11419,7 +11419,7 @@ pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11430,7 +11430,7 @@ pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11441,7 +11441,7 @@ pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11452,7 +11452,7 @@ pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11463,7 +11463,7 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11474,7 +11474,7 @@ pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ldr))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11485,7 +11485,7 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld))] @@ -11496,7 +11496,7 @@ pub unsafe fn vld1_f64_x2(ptr: *const f64) -> float64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld))] @@ -11507,7 +11507,7 @@ pub unsafe fn vld1_f64_x3(ptr: *const f64) -> float64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld))] @@ -11518,7 +11518,7 @@ pub unsafe fn vld1_f64_x4(ptr: *const f64) -> float64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld))] @@ -11529,7 +11529,7 @@ pub unsafe fn vld1q_f64_x2(ptr: *const f64) -> float64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld))] @@ -11540,7 +11540,7 @@ pub unsafe fn vld1q_f64_x3(ptr: *const f64) -> float64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld))] @@ -11551,7 +11551,7 @@ pub unsafe fn vld1q_f64_x4(ptr: *const f64) -> float64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] @@ -11569,7 +11569,7 @@ pub unsafe fn vld2_dup_f64(a: *const f64) -> float64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] @@ -11587,7 +11587,7 @@ pub unsafe fn vld2q_dup_f64(a: *const f64) -> float64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2r))] @@ -11605,7 +11605,7 @@ pub unsafe fn vld2q_dup_s64(a: *const i64) -> int64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -11616,7 +11616,7 @@ pub unsafe fn vld2_f64(a: *const f64) -> float64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11636,7 +11636,7 @@ pub unsafe fn vld2_lane_f64(a: *const f64, b: float64x1x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11656,7 +11656,7 @@ pub unsafe fn vld2_lane_s64(a: *const i64, b: int64x1x2_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11669,7 +11669,7 @@ pub unsafe fn vld2_lane_p64(a: *const p64, b: poly64x1x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11682,7 +11682,7 @@ pub unsafe fn vld2_lane_u64(a: *const u64, b: uint64x1x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11694,7 +11694,7 @@ pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11709,7 +11709,7 @@ pub unsafe fn vld2q_dup_p64(a: *const p64) -> poly64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11721,7 +11721,7 @@ pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11736,7 +11736,7 @@ pub unsafe fn vld2q_dup_u64(a: *const u64) -> uint64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] @@ -11754,7 +11754,7 @@ pub unsafe fn vld2q_f64(a: *const f64) -> float64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] @@ -11772,7 +11772,7 @@ pub unsafe fn vld2q_s64(a: *const i64) -> int64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11793,7 +11793,7 @@ pub unsafe fn vld2q_lane_f64(a: *const f64, b: float64x2x2_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11813,7 +11813,7 @@ pub unsafe fn vld2q_lane_s8(a: *const i8, b: int8x16x2_t) -> in #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11833,7 +11833,7 @@ pub unsafe fn vld2q_lane_s64(a: *const i64, b: int64x2x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11846,7 +11846,7 @@ pub unsafe fn vld2q_lane_p64(a: *const p64, b: poly64x2x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11859,7 +11859,7 @@ pub unsafe fn vld2q_lane_u8(a: *const u8, b: uint8x16x2_t) -> u #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11872,7 +11872,7 @@ pub unsafe fn vld2q_lane_u64(a: *const u64, b: uint64x2x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -11885,7 +11885,7 @@ pub unsafe fn vld2q_lane_p8(a: *const p8, b: poly8x16x2_t) -> p #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11897,7 +11897,7 @@ pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -11912,7 +11912,7 @@ pub unsafe fn vld2q_p64(a: *const p64) -> poly64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld2))] @@ -11923,7 +11923,7 @@ pub unsafe fn vld2q_u64(a: *const u64) -> uint64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] @@ -11941,7 +11941,7 @@ pub unsafe fn vld3_dup_f64(a: *const f64) -> float64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] @@ -11959,7 +11959,7 @@ pub unsafe fn vld3q_dup_f64(a: *const f64) -> float64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3r))] @@ -11977,7 +11977,7 @@ pub unsafe fn vld3q_dup_s64(a: *const i64) -> int64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -11988,7 +11988,7 @@ pub unsafe fn vld3_f64(a: *const f64) -> float64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12014,7 +12014,7 @@ pub unsafe fn vld3_lane_f64(a: *const f64, b: float64x1x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12027,7 +12027,7 @@ pub unsafe fn vld3_lane_p64(a: *const p64, b: poly64x1x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12053,7 +12053,7 @@ pub unsafe fn vld3_lane_s64(a: *const i64, b: int64x1x3_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12066,7 +12066,7 @@ pub unsafe fn vld3_lane_u64(a: *const u64, b: uint64x1x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -12078,7 +12078,7 @@ pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -12094,7 +12094,7 @@ pub unsafe fn vld3q_dup_p64(a: *const p64) -> poly64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -12106,7 +12106,7 @@ pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -12122,7 +12122,7 @@ pub unsafe fn vld3q_dup_u64(a: *const u64) -> uint64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] @@ -12133,7 +12133,7 @@ pub unsafe fn vld3q_f64(a: *const f64) -> float64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] @@ -12144,7 +12144,7 @@ pub unsafe fn vld3q_s64(a: *const i64) -> int64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12170,7 +12170,7 @@ pub unsafe fn vld3q_lane_f64(a: *const f64, b: float64x2x3_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12183,7 +12183,7 @@ pub unsafe fn vld3q_lane_p64(a: *const p64, b: poly64x2x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12209,7 +12209,7 @@ pub unsafe fn vld3q_lane_s8(a: *const i8, b: int8x16x3_t) -> in #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12235,7 +12235,7 @@ pub unsafe fn vld3q_lane_s64(a: *const i64, b: int64x2x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12248,7 +12248,7 @@ pub unsafe fn vld3q_lane_u8(a: *const u8, b: uint8x16x3_t) -> u #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12261,7 +12261,7 @@ pub unsafe fn vld3q_lane_u64(a: *const u64, b: uint64x2x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12274,7 +12274,7 @@ pub unsafe fn vld3q_lane_p8(a: *const p8, b: poly8x16x3_t) -> p #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -12286,7 +12286,7 @@ pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -12302,7 +12302,7 @@ pub unsafe fn vld3q_p64(a: *const p64) -> poly64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld3))] @@ -12313,7 +12313,7 @@ pub unsafe fn vld3q_u64(a: *const u64) -> uint64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -12331,7 +12331,7 @@ pub unsafe fn vld4_dup_f64(a: *const f64) -> float64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -12349,7 +12349,7 @@ pub unsafe fn vld4q_dup_f64(a: *const f64) -> float64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4r))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -12367,7 +12367,7 @@ pub unsafe fn vld4q_dup_s64(a: *const i64) -> int64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -12378,7 +12378,7 @@ pub unsafe fn vld4_f64(a: *const f64) -> float64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12405,7 +12405,7 @@ pub unsafe fn vld4_lane_f64(a: *const f64, b: float64x1x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12432,7 +12432,7 @@ pub unsafe fn vld4_lane_s64(a: *const i64, b: int64x1x4_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12445,7 +12445,7 @@ pub unsafe fn vld4_lane_p64(a: *const p64, b: poly64x1x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12458,7 +12458,7 @@ pub unsafe fn vld4_lane_u64(a: *const u64, b: uint64x1x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld4r))] @@ -12470,7 +12470,7 @@ pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld4r))] @@ -12487,7 +12487,7 @@ pub unsafe fn vld4q_dup_p64(a: *const p64) -> poly64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4r))] @@ -12499,7 +12499,7 @@ pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4r))] @@ -12516,7 +12516,7 @@ pub unsafe fn vld4q_dup_u64(a: *const u64) -> uint64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] @@ -12527,7 +12527,7 @@ pub unsafe fn vld4q_f64(a: *const f64) -> float64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] @@ -12538,7 +12538,7 @@ pub unsafe fn vld4q_s64(a: *const i64) -> int64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12565,7 +12565,7 @@ pub unsafe fn vld4q_lane_f64(a: *const f64, b: float64x2x4_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12592,7 +12592,7 @@ pub unsafe fn vld4q_lane_s8(a: *const i8, b: int8x16x4_t) -> in #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12619,7 +12619,7 @@ pub unsafe fn vld4q_lane_s64(a: *const i64, b: int64x2x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12632,7 +12632,7 @@ pub unsafe fn vld4q_lane_p64(a: *const p64, b: poly64x2x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12645,7 +12645,7 @@ pub unsafe fn vld4q_lane_u8(a: *const u8, b: uint8x16x4_t) -> u #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12658,7 +12658,7 @@ pub unsafe fn vld4q_lane_u64(a: *const u64, b: uint64x2x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12671,7 +12671,7 @@ pub unsafe fn vld4q_lane_p8(a: *const p8, b: poly8x16x4_t) -> p #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[target_feature(enable = "neon,aes")] @@ -12683,7 +12683,7 @@ pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[target_feature(enable = "neon,aes")] @@ -12700,7 +12700,7 @@ pub unsafe fn vld4q_p64(a: *const p64) -> poly64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ld4))] @@ -12711,7 +12711,7 @@ pub unsafe fn vld4q_u64(a: *const u64) -> uint64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12730,7 +12730,7 @@ pub unsafe fn vldap1_lane_s64(ptr: *const i64, src: int64x1_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -12749,7 +12749,7 @@ pub unsafe fn vldap1q_lane_s64(ptr: *const i64, src: int64x2_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))] @@ -12763,7 +12763,7 @@ pub unsafe fn vldap1q_lane_f64(ptr: *const f64, src: float64x2_ #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))] @@ -12777,7 +12777,7 @@ pub unsafe fn vldap1_lane_u64(ptr: *const u64, src: uint64x1_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))] @@ -12791,7 +12791,7 @@ pub unsafe fn vldap1q_lane_u64(ptr: *const u64, src: uint64x2_t #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))] @@ -12805,7 +12805,7 @@ pub unsafe fn vldap1_lane_p64(ptr: *const p64, src: poly64x1_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldap1q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(ldap1, LANE = 0))] @@ -12819,7 +12819,7 @@ pub unsafe fn vldap1q_lane_p64(ptr: *const p64, src: poly64x2_t #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12832,7 +12832,7 @@ pub unsafe fn vluti2_lane_f16(a: float16x4_t, b: uint8x8_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12845,7 +12845,7 @@ pub unsafe fn vluti2q_lane_f16(a: float16x8_t, b: uint8x8_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12858,7 +12858,7 @@ pub unsafe fn vluti2_lane_u8(a: uint8x8_t, b: uint8x8_t) -> ui #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12871,7 +12871,7 @@ pub unsafe fn vluti2q_lane_u8(a: uint8x16_t, b: uint8x8_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12884,7 +12884,7 @@ pub unsafe fn vluti2_lane_u16(a: uint16x4_t, b: uint8x8_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12897,7 +12897,7 @@ pub unsafe fn vluti2q_lane_u16(a: uint16x8_t, b: uint8x8_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12910,7 +12910,7 @@ pub unsafe fn vluti2_lane_p8(a: poly8x8_t, b: uint8x8_t) -> po #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12923,7 +12923,7 @@ pub unsafe fn vluti2q_lane_p8(a: poly8x16_t, b: uint8x8_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12936,7 +12936,7 @@ pub unsafe fn vluti2_lane_p16(a: poly16x4_t, b: uint8x8_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12949,7 +12949,7 @@ pub unsafe fn vluti2q_lane_p16(a: poly16x8_t, b: uint8x8_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12969,7 +12969,7 @@ pub unsafe fn vluti2_lane_s8(a: int8x8_t, b: uint8x8_t) -> int8 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -12989,7 +12989,7 @@ pub unsafe fn vluti2q_lane_s8(a: int8x16_t, b: uint8x8_t) -> in #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13009,7 +13009,7 @@ pub unsafe fn vluti2_lane_s16(a: int16x4_t, b: uint8x8_t) -> in #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13029,7 +13029,7 @@ pub unsafe fn vluti2q_lane_s16(a: int16x8_t, b: uint8x8_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13042,7 +13042,7 @@ pub unsafe fn vluti2_laneq_f16(a: float16x4_t, b: uint8x16_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13055,7 +13055,7 @@ pub unsafe fn vluti2q_laneq_f16(a: float16x8_t, b: uint8x16_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13068,7 +13068,7 @@ pub unsafe fn vluti2_laneq_u8(a: uint8x8_t, b: uint8x16_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13081,7 +13081,7 @@ pub unsafe fn vluti2q_laneq_u8(a: uint8x16_t, b: uint8x16_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13094,7 +13094,7 @@ pub unsafe fn vluti2_laneq_u16(a: uint16x4_t, b: uint8x16_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13107,7 +13107,7 @@ pub unsafe fn vluti2q_laneq_u16(a: uint16x8_t, b: uint8x16_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13120,7 +13120,7 @@ pub unsafe fn vluti2_laneq_p8(a: poly8x8_t, b: uint8x16_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13133,7 +13133,7 @@ pub unsafe fn vluti2q_laneq_p8(a: poly8x16_t, b: uint8x16_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13146,7 +13146,7 @@ pub unsafe fn vluti2_laneq_p16(a: poly16x4_t, b: uint8x16_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13159,7 +13159,7 @@ pub unsafe fn vluti2q_laneq_p16(a: poly16x8_t, b: uint8x16_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13179,7 +13179,7 @@ pub unsafe fn vluti2_laneq_s8(a: int8x8_t, b: uint8x16_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13199,7 +13199,7 @@ pub unsafe fn vluti2q_laneq_s8(a: int8x16_t, b: uint8x16_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13219,7 +13219,7 @@ pub unsafe fn vluti2_laneq_s16(a: int16x4_t, b: uint8x16_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti2q_laneq_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, INDEX = 1))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13239,7 +13239,7 @@ pub unsafe fn vluti2q_laneq_s16(a: int16x8_t, b: uint8x16_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_f16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut,fp16")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13252,7 +13252,7 @@ pub unsafe fn vluti4q_lane_f16_x2(a: float16x8x2_t, b: uint8x8_ #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13265,7 +13265,7 @@ pub unsafe fn vluti4q_lane_u16_x2(a: uint16x8x2_t, b: uint8x8_t #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13278,7 +13278,7 @@ pub unsafe fn vluti4q_lane_p16_x2(a: poly16x8x2_t, b: uint8x8_t #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13298,7 +13298,7 @@ pub unsafe fn vluti4q_lane_s16_x2(a: int16x8x2_t, b: uint8x8_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13318,7 +13318,7 @@ pub unsafe fn vluti4q_lane_s8(a: int8x16_t, b: uint8x8_t) -> in #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13331,7 +13331,7 @@ pub unsafe fn vluti4q_lane_u8(a: uint8x16_t, b: uint8x8_t) -> u #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13344,7 +13344,7 @@ pub unsafe fn vluti4q_lane_p8(a: poly8x16_t, b: uint8x8_t) -> p #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_f16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut,fp16")] #[cfg_attr(test, assert_instr(nop, LANE = 3))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13360,7 +13360,7 @@ pub unsafe fn vluti4q_laneq_f16_x2( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 3))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13373,7 +13373,7 @@ pub unsafe fn vluti4q_laneq_u16_x2(a: uint16x8x2_t, b: uint8x16 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 3))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13386,7 +13386,7 @@ pub unsafe fn vluti4q_laneq_p16_x2(a: poly16x8x2_t, b: uint8x16 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 3))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13406,7 +13406,7 @@ pub unsafe fn vluti4q_laneq_s16_x2(a: int16x8x2_t, b: uint8x16_ #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13426,7 +13426,7 @@ pub unsafe fn vluti4q_laneq_s8(a: int8x16_t, b: uint8x16_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13439,7 +13439,7 @@ pub unsafe fn vluti4q_laneq_u8(a: uint8x16_t, b: uint8x16_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vluti4q_laneq_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,lut")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[unstable(feature = "stdarch_neon_feat_lut", issue = "138050")] @@ -13450,7 +13450,7 @@ pub unsafe fn vluti4q_laneq_p8(a: poly8x16_t, b: uint8x16_t) -> } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmax))] @@ -13466,7 +13466,7 @@ pub fn vmax_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmax))] @@ -13482,7 +13482,7 @@ pub fn vmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -13499,7 +13499,7 @@ pub fn vmaxh_f16(a: f16, b: f16) -> f16 { } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -13515,7 +13515,7 @@ pub fn vmaxnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -13531,7 +13531,7 @@ pub fn vmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point Maximum Number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -13548,7 +13548,7 @@ pub fn vmaxnmh_f16(a: f16, b: f16) -> f16 { } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -13565,7 +13565,7 @@ pub fn vmaxnmv_f16(a: float16x4_t) -> f16 { } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -13582,7 +13582,7 @@ pub fn vmaxnmvq_f16(a: float16x8_t) -> f16 { } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmv_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmp))] @@ -13598,7 +13598,7 @@ pub fn vmaxnmv_f32(a: float32x2_t) -> f32 { } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmp))] @@ -13614,7 +13614,7 @@ pub fn vmaxnmvq_f64(a: float64x2_t) -> f64 { } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmvq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxnmv))] @@ -13630,7 +13630,7 @@ pub fn vmaxnmvq_f32(a: float32x4_t) -> f32 { } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -13647,7 +13647,7 @@ pub fn vmaxv_f16(a: float16x4_t) -> f16 { } #[doc = "Floating-point maximum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -13664,7 +13664,7 @@ pub fn vmaxvq_f16(a: float16x8_t) -> f16 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -13680,7 +13680,7 @@ pub fn vmaxv_f32(a: float32x2_t) -> f32 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxv))] @@ -13696,7 +13696,7 @@ pub fn vmaxvq_f32(a: float32x4_t) -> f32 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -13712,7 +13712,7 @@ pub fn vmaxvq_f64(a: float64x2_t) -> f64 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] @@ -13721,7 +13721,7 @@ pub fn vmaxv_s8(a: int8x8_t) -> i8 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] @@ -13730,7 +13730,7 @@ pub fn vmaxvq_s8(a: int8x16_t) -> i8 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] @@ -13739,7 +13739,7 @@ pub fn vmaxv_s16(a: int16x4_t) -> i16 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] @@ -13748,7 +13748,7 @@ pub fn vmaxvq_s16(a: int16x8_t) -> i16 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] @@ -13757,7 +13757,7 @@ pub fn vmaxv_s32(a: int32x2_t) -> i32 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxv))] @@ -13766,7 +13766,7 @@ pub fn vmaxvq_s32(a: int32x4_t) -> i32 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] @@ -13775,7 +13775,7 @@ pub fn vmaxv_u8(a: uint8x8_t) -> u8 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] @@ -13784,7 +13784,7 @@ pub fn vmaxvq_u8(a: uint8x16_t) -> u8 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] @@ -13793,7 +13793,7 @@ pub fn vmaxv_u16(a: uint16x4_t) -> u16 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] @@ -13802,7 +13802,7 @@ pub fn vmaxvq_u16(a: uint16x8_t) -> u16 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxv_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] @@ -13811,7 +13811,7 @@ pub fn vmaxv_u32(a: uint32x2_t) -> u32 { } #[doc = "Horizontal vector max."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxvq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxv))] @@ -13820,7 +13820,7 @@ pub fn vmaxvq_u32(a: uint32x4_t) -> u32 { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmin))] @@ -13836,7 +13836,7 @@ pub fn vmin_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmin))] @@ -13852,7 +13852,7 @@ pub fn vminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -13869,7 +13869,7 @@ pub fn vminh_f16(a: f16, b: f16) -> f16 { } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminnm))] @@ -13885,7 +13885,7 @@ pub fn vminnm_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminnm))] @@ -13901,7 +13901,7 @@ pub fn vminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point Minimum Number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -13918,7 +13918,7 @@ pub fn vminnmh_f16(a: f16, b: f16) -> f16 { } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -13935,7 +13935,7 @@ pub fn vminnmv_f16(a: float16x4_t) -> f16 { } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -13952,7 +13952,7 @@ pub fn vminnmvq_f16(a: float16x8_t) -> f16 { } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmv_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13968,7 +13968,7 @@ pub fn vminnmv_f32(a: float32x2_t) -> f32 { } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -13984,7 +13984,7 @@ pub fn vminnmvq_f64(a: float64x2_t) -> f64 { } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmvq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmv))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14000,7 +14000,7 @@ pub fn vminnmvq_f32(a: float32x4_t) -> f32 { } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -14017,7 +14017,7 @@ pub fn vminv_f16(a: float16x4_t) -> f16 { } #[doc = "Floating-point minimum number across vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -14034,7 +14034,7 @@ pub fn vminvq_f16(a: float16x8_t) -> f16 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] @@ -14050,7 +14050,7 @@ pub fn vminv_f32(a: float32x2_t) -> f32 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminv))] @@ -14066,7 +14066,7 @@ pub fn vminvq_f32(a: float32x4_t) -> f32 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] @@ -14082,7 +14082,7 @@ pub fn vminvq_f64(a: float64x2_t) -> f64 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] @@ -14091,7 +14091,7 @@ pub fn vminv_s8(a: int8x8_t) -> i8 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] @@ -14100,7 +14100,7 @@ pub fn vminvq_s8(a: int8x16_t) -> i8 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] @@ -14109,7 +14109,7 @@ pub fn vminv_s16(a: int16x4_t) -> i16 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] @@ -14118,7 +14118,7 @@ pub fn vminvq_s16(a: int16x8_t) -> i16 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] @@ -14127,7 +14127,7 @@ pub fn vminv_s32(a: int32x2_t) -> i32 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminv))] @@ -14136,7 +14136,7 @@ pub fn vminvq_s32(a: int32x4_t) -> i32 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] @@ -14145,7 +14145,7 @@ pub fn vminv_u8(a: uint8x8_t) -> u8 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] @@ -14154,7 +14154,7 @@ pub fn vminvq_u8(a: uint8x16_t) -> u8 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] @@ -14163,7 +14163,7 @@ pub fn vminv_u16(a: uint16x4_t) -> u16 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] @@ -14172,7 +14172,7 @@ pub fn vminvq_u16(a: uint16x8_t) -> u16 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminv_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] @@ -14181,7 +14181,7 @@ pub fn vminv_u32(a: uint32x2_t) -> u32 { } #[doc = "Horizontal vector min."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminvq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminv))] @@ -14190,7 +14190,7 @@ pub fn vminvq_u32(a: uint32x4_t) -> u32 { } #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14199,7 +14199,7 @@ pub fn vmla_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { } #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14208,7 +14208,7 @@ pub fn vmlaq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14219,7 +14219,7 @@ pub fn vmlal_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16 } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14234,7 +14234,7 @@ pub fn vmlal_high_laneq_s16( } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14245,7 +14245,7 @@ pub fn vmlal_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32 } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14260,7 +14260,7 @@ pub fn vmlal_high_laneq_s32( } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14275,7 +14275,7 @@ pub fn vmlal_high_lane_u16( } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14290,7 +14290,7 @@ pub fn vmlal_high_laneq_u16( } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14305,7 +14305,7 @@ pub fn vmlal_high_lane_u32( } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14320,7 +14320,7 @@ pub fn vmlal_high_laneq_u32( } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14329,7 +14329,7 @@ pub fn vmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14338,7 +14338,7 @@ pub fn vmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14347,7 +14347,7 @@ pub fn vmlal_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { } #[doc = "Multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14356,7 +14356,7 @@ pub fn vmlal_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { } #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14369,7 +14369,7 @@ pub fn vmlal_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { } #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14382,7 +14382,7 @@ pub fn vmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { } #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14395,7 +14395,7 @@ pub fn vmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { } #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14408,7 +14408,7 @@ pub fn vmlal_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t } #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14421,7 +14421,7 @@ pub fn vmlal_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t } #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14434,7 +14434,7 @@ pub fn vmlal_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t } #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14443,7 +14443,7 @@ pub fn vmls_f64(a: float64x1_t, b: float64x1_t, c: float64x1_t) -> float64x1_t { } #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14452,7 +14452,7 @@ pub fn vmlsq_f64(a: float64x2_t, b: float64x2_t, c: float64x2_t) -> float64x2_t } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14463,7 +14463,7 @@ pub fn vmlsl_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16 } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14478,7 +14478,7 @@ pub fn vmlsl_high_laneq_s16( } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14489,7 +14489,7 @@ pub fn vmlsl_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32 } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14504,7 +14504,7 @@ pub fn vmlsl_high_laneq_s32( } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14519,7 +14519,7 @@ pub fn vmlsl_high_lane_u16( } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14534,7 +14534,7 @@ pub fn vmlsl_high_laneq_u16( } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14549,7 +14549,7 @@ pub fn vmlsl_high_lane_u32( } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -14564,7 +14564,7 @@ pub fn vmlsl_high_laneq_u32( } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14573,7 +14573,7 @@ pub fn vmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14582,7 +14582,7 @@ pub fn vmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14591,7 +14591,7 @@ pub fn vmlsl_high_n_u16(a: uint32x4_t, b: uint16x8_t, c: u16) -> uint32x4_t { } #[doc = "Multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14600,7 +14600,7 @@ pub fn vmlsl_high_n_u32(a: uint64x2_t, b: uint32x4_t, c: u32) -> uint64x2_t { } #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14613,7 +14613,7 @@ pub fn vmlsl_high_s8(a: int16x8_t, b: int8x16_t, c: int8x16_t) -> int16x8_t { } #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14626,7 +14626,7 @@ pub fn vmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { } #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14639,7 +14639,7 @@ pub fn vmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { } #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14652,7 +14652,7 @@ pub fn vmlsl_high_u8(a: uint16x8_t, b: uint8x16_t, c: uint8x16_t) -> uint16x8_t } #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14665,7 +14665,7 @@ pub fn vmlsl_high_u16(a: uint32x4_t, b: uint16x8_t, c: uint16x8_t) -> uint32x4_t } #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14678,7 +14678,7 @@ pub fn vmlsl_high_u32(a: uint64x2_t, b: uint32x4_t, c: uint32x4_t) -> uint64x2_t } #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sxtl2))] @@ -14690,7 +14690,7 @@ pub fn vmovl_high_s8(a: int8x16_t) -> int16x8_t { } #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sxtl2))] @@ -14702,7 +14702,7 @@ pub fn vmovl_high_s16(a: int16x8_t) -> int32x4_t { } #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sxtl2))] @@ -14714,7 +14714,7 @@ pub fn vmovl_high_s32(a: int32x4_t) -> int64x2_t { } #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uxtl2))] @@ -14726,7 +14726,7 @@ pub fn vmovl_high_u8(a: uint8x16_t) -> uint16x8_t { } #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uxtl2))] @@ -14738,7 +14738,7 @@ pub fn vmovl_high_u16(a: uint16x8_t) -> uint32x4_t { } #[doc = "Vector move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uxtl2))] @@ -14750,7 +14750,7 @@ pub fn vmovl_high_u32(a: uint32x4_t) -> uint64x2_t { } #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(xtn2))] @@ -14762,7 +14762,7 @@ pub fn vmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { } #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(xtn2))] @@ -14774,7 +14774,7 @@ pub fn vmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { } #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(xtn2))] @@ -14786,7 +14786,7 @@ pub fn vmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { } #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(xtn2))] @@ -14798,7 +14798,7 @@ pub fn vmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { } #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(xtn2))] @@ -14810,7 +14810,7 @@ pub fn vmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { } #[doc = "Extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_high_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(xtn2))] @@ -14822,7 +14822,7 @@ pub fn vmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmul))] @@ -14831,7 +14831,7 @@ pub fn vmul_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmul))] @@ -14840,7 +14840,7 @@ pub fn vmulq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -14851,7 +14851,7 @@ pub fn vmul_lane_f64(a: float64x1_t, b: float64x1_t) -> float64 } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] @@ -14863,7 +14863,7 @@ pub fn vmul_laneq_f16(a: float16x4_t, b: float16x8_t) -> float1 } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] @@ -14875,7 +14875,7 @@ pub fn vmulq_laneq_f16(a: float16x8_t, b: float16x8_t) -> float } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -14886,7 +14886,7 @@ pub fn vmul_laneq_f64(a: float64x1_t, b: float64x2_t) -> float6 } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14895,7 +14895,7 @@ pub fn vmul_n_f64(a: float64x1_t, b: f64) -> float64x1_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -14904,7 +14904,7 @@ pub fn vmulq_n_f64(a: float64x2_t, b: f64) -> float64x2_t { } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -14918,7 +14918,7 @@ pub fn vmuld_lane_f64(a: f64, b: float64x1_t) -> f64 { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -14928,7 +14928,7 @@ pub fn vmulh_f16(a: f16, b: f16) -> f16 { } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] @@ -14943,7 +14943,7 @@ pub fn vmulh_lane_f16(a: f16, b: float16x4_t) -> f16 { } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulh_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] @@ -14958,7 +14958,7 @@ pub fn vmulh_laneq_f16(a: f16, b: float16x8_t) -> f16 { } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -14969,7 +14969,7 @@ pub fn vmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32 } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -14980,7 +14980,7 @@ pub fn vmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int3 } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -14991,7 +14991,7 @@ pub fn vmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64 } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -15002,7 +15002,7 @@ pub fn vmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int6 } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -15013,7 +15013,7 @@ pub fn vmull_high_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uin } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -15024,7 +15024,7 @@ pub fn vmull_high_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> ui } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -15035,7 +15035,7 @@ pub fn vmull_high_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uin } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -15046,7 +15046,7 @@ pub fn vmull_high_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> ui } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -15055,7 +15055,7 @@ pub fn vmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(smull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -15064,7 +15064,7 @@ pub fn vmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -15073,7 +15073,7 @@ pub fn vmull_high_n_u16(a: uint16x8_t, b: u16) -> uint32x4_t { } #[doc = "Multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(umull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -15082,7 +15082,7 @@ pub fn vmull_high_n_u32(a: uint32x4_t, b: u32) -> uint64x2_t { } #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(pmull2))] @@ -15091,7 +15091,7 @@ pub fn vmull_high_p64(a: poly64x2_t, b: poly64x2_t) -> p128 { } #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(pmull2))] @@ -15104,7 +15104,7 @@ pub fn vmull_high_p8(a: poly8x16_t, b: poly8x16_t) -> poly16x8_t { } #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smull2))] @@ -15117,7 +15117,7 @@ pub fn vmull_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { } #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smull2))] @@ -15130,7 +15130,7 @@ pub fn vmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { } #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smull2))] @@ -15143,7 +15143,7 @@ pub fn vmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umull2))] @@ -15156,7 +15156,7 @@ pub fn vmull_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umull2))] @@ -15169,7 +15169,7 @@ pub fn vmull_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umull2))] @@ -15182,7 +15182,7 @@ pub fn vmull_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { } #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(pmull))] @@ -15198,7 +15198,7 @@ pub fn vmull_p64(a: p64, b: p64) -> p128 { } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15209,7 +15209,7 @@ pub fn vmulq_lane_f64(a: float64x2_t, b: float64x1_t) -> float6 } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15220,7 +15220,7 @@ pub fn vmulq_laneq_f64(a: float64x2_t, b: float64x2_t) -> float } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15234,7 +15234,7 @@ pub fn vmuls_lane_f32(a: f32, b: float32x2_t) -> f32 { } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuls_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15248,7 +15248,7 @@ pub fn vmuls_laneq_f32(a: f32, b: float32x4_t) -> f32 { } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmuld_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmul, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15262,7 +15262,7 @@ pub fn vmuld_laneq_f64(a: f64, b: float64x2_t) -> f64 { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -15279,7 +15279,7 @@ pub fn vmulx_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -15296,7 +15296,7 @@ pub fn vmulxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] @@ -15312,7 +15312,7 @@ pub fn vmulx_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] @@ -15328,7 +15328,7 @@ pub fn vmulxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] @@ -15344,7 +15344,7 @@ pub fn vmulx_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] @@ -15360,7 +15360,7 @@ pub fn vmulxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] @@ -15372,7 +15372,7 @@ pub fn vmulx_lane_f16(a: float16x4_t, b: float16x4_t) -> float1 } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] @@ -15384,7 +15384,7 @@ pub fn vmulx_laneq_f16(a: float16x4_t, b: float16x8_t) -> float } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] @@ -15396,7 +15396,7 @@ pub fn vmulxq_lane_f16(a: float16x8_t, b: float16x4_t) -> float } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] @@ -15408,7 +15408,7 @@ pub fn vmulxq_laneq_f16(a: float16x8_t, b: float16x8_t) -> floa } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15419,7 +15419,7 @@ pub fn vmulx_lane_f32(a: float32x2_t, b: float32x2_t) -> float3 } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15430,7 +15430,7 @@ pub fn vmulx_laneq_f32(a: float32x2_t, b: float32x4_t) -> float } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15441,7 +15441,7 @@ pub fn vmulxq_lane_f32(a: float32x4_t, b: float32x2_t) -> float } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15452,7 +15452,7 @@ pub fn vmulxq_laneq_f32(a: float32x4_t, b: float32x4_t) -> floa } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15463,7 +15463,7 @@ pub fn vmulxq_laneq_f64(a: float64x2_t, b: float64x2_t) -> floa } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15474,7 +15474,7 @@ pub fn vmulx_lane_f64(a: float64x1_t, b: float64x1_t) -> float6 } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15485,7 +15485,7 @@ pub fn vmulx_laneq_f64(a: float64x1_t, b: float64x2_t) -> float } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulx_n_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmulx))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -15495,7 +15495,7 @@ pub fn vmulx_n_f16(a: float16x4_t, b: f16) -> float16x4_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_n_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmulx))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -15505,7 +15505,7 @@ pub fn vmulxq_n_f16(a: float16x8_t, b: f16) -> float16x8_t { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] @@ -15521,7 +15521,7 @@ pub fn vmulxd_f64(a: f64, b: f64) -> f64 { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmulx))] @@ -15537,7 +15537,7 @@ pub fn vmulxs_f32(a: f32, b: f32) -> f32 { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15548,7 +15548,7 @@ pub fn vmulxd_lane_f64(a: f64, b: float64x1_t) -> f64 { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxd_laneq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15559,7 +15559,7 @@ pub fn vmulxd_laneq_f64(a: f64, b: float64x2_t) -> f64 { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15570,7 +15570,7 @@ pub fn vmulxs_lane_f32(a: f32, b: float32x2_t) -> f32 { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxs_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15581,7 +15581,7 @@ pub fn vmulxs_laneq_f32(a: f32, b: float32x4_t) -> f32 { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -15598,7 +15598,7 @@ pub fn vmulxh_f16(a: f16, b: f16) -> f16 { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] @@ -15610,7 +15610,7 @@ pub fn vmulxh_lane_f16(a: f16, b: float16x4_t) -> f16 { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxh_laneq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] #[target_feature(enable = "neon,fp16")] @@ -15622,7 +15622,7 @@ pub fn vmulxh_laneq_f16(a: f16, b: float16x8_t) -> f16 { } #[doc = "Floating-point multiply extended"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulxq_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmulx, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -15633,7 +15633,7 @@ pub fn vmulxq_lane_f64(a: float64x2_t, b: float64x1_t) -> float } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fneg))] @@ -15642,7 +15642,7 @@ pub fn vneg_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fneg))] @@ -15651,7 +15651,7 @@ pub fn vnegq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(neg))] @@ -15660,7 +15660,7 @@ pub fn vneg_s64(a: int64x1_t) -> int64x1_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(neg))] @@ -15669,7 +15669,7 @@ pub fn vnegq_s64(a: int64x2_t) -> int64x2_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(neg))] @@ -15678,7 +15678,7 @@ pub fn vnegd_s64(a: i64) -> i64 { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -15688,7 +15688,7 @@ pub fn vnegh_f16(a: f16) -> f16 { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -15701,7 +15701,7 @@ pub fn vpaddd_f64(a: float64x2_t) -> f64 { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadds_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -15714,7 +15714,7 @@ pub fn vpadds_f32(a: float32x2_t) -> f32 { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -15723,7 +15723,7 @@ pub fn vpaddd_s64(a: int64x2_t) -> i64 { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -15732,7 +15732,7 @@ pub fn vpaddd_u64(a: uint64x2_t) -> u64 { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -15746,7 +15746,7 @@ pub fn vpaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] @@ -15759,7 +15759,7 @@ pub fn vpaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(faddp))] @@ -15772,7 +15772,7 @@ pub fn vpaddq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -15785,7 +15785,7 @@ pub fn vpaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -15798,7 +15798,7 @@ pub fn vpaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -15811,7 +15811,7 @@ pub fn vpaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -15824,7 +15824,7 @@ pub fn vpaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -15837,7 +15837,7 @@ pub fn vpaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -15850,7 +15850,7 @@ pub fn vpaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -15863,7 +15863,7 @@ pub fn vpaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Add Pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(addp))] @@ -15876,7 +15876,7 @@ pub fn vpaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -15893,7 +15893,7 @@ pub fn vpmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -15910,7 +15910,7 @@ pub fn vpmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -15927,7 +15927,7 @@ pub fn vpmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -15944,7 +15944,7 @@ pub fn vpmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnm_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -15960,7 +15960,7 @@ pub fn vpmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -15976,7 +15976,7 @@ pub fn vpmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Floating-point Maximum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -15992,7 +15992,7 @@ pub fn vpmaxnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnmqd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16008,7 +16008,7 @@ pub fn vpmaxnmqd_f64(a: float64x2_t) -> f64 { } #[doc = "Floating-point maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxnms_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fmaxnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16024,7 +16024,7 @@ pub fn vpmaxnms_f32(a: float32x2_t) -> f32 { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -16040,7 +16040,7 @@ pub fn vpmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -16056,7 +16056,7 @@ pub fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] @@ -16072,7 +16072,7 @@ pub fn vpmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] @@ -16088,7 +16088,7 @@ pub fn vpmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(smaxp))] @@ -16104,7 +16104,7 @@ pub fn vpmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] @@ -16120,7 +16120,7 @@ pub fn vpmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] @@ -16136,7 +16136,7 @@ pub fn vpmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(umaxp))] @@ -16152,7 +16152,7 @@ pub fn vpmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxqd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -16168,7 +16168,7 @@ pub fn vpmaxqd_f64(a: float64x2_t) -> f64 { } #[doc = "Floating-point maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmaxs_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -16184,7 +16184,7 @@ pub fn vpmaxs_f32(a: float32x2_t) -> f32 { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -16201,7 +16201,7 @@ pub fn vpmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -16218,7 +16218,7 @@ pub fn vpminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -16235,7 +16235,7 @@ pub fn vpminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -16252,7 +16252,7 @@ pub fn vpminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnm_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16268,7 +16268,7 @@ pub fn vpminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16284,7 +16284,7 @@ pub fn vpminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Floating-point Minimum Number Pairwise (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16300,7 +16300,7 @@ pub fn vpminnmq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnmqd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16316,7 +16316,7 @@ pub fn vpminnmqd_f64(a: float64x2_t) -> f64 { } #[doc = "Floating-point minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminnms_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fminnmp))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16332,7 +16332,7 @@ pub fn vpminnms_f32(a: float32x2_t) -> f32 { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] @@ -16348,7 +16348,7 @@ pub fn vpminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] @@ -16364,7 +16364,7 @@ pub fn vpminq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] @@ -16380,7 +16380,7 @@ pub fn vpminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] @@ -16396,7 +16396,7 @@ pub fn vpminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sminp))] @@ -16412,7 +16412,7 @@ pub fn vpminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] @@ -16428,7 +16428,7 @@ pub fn vpminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] @@ -16444,7 +16444,7 @@ pub fn vpminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uminp))] @@ -16460,7 +16460,7 @@ pub fn vpminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpminqd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] @@ -16476,7 +16476,7 @@ pub fn vpminqd_f64(a: float64x2_t) -> f64 { } #[doc = "Floating-point minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmins_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fminp))] @@ -16492,7 +16492,7 @@ pub fn vpmins_f32(a: float32x2_t) -> f32 { } #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] @@ -16508,7 +16508,7 @@ pub fn vqabs_s64(a: int64x1_t) -> int64x1_t { } #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] @@ -16524,7 +16524,7 @@ pub fn vqabsq_s64(a: int64x2_t) -> int64x2_t { } #[doc = "Signed saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsb_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] @@ -16533,7 +16533,7 @@ pub fn vqabsb_s8(a: i8) -> i8 { } #[doc = "Signed saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] @@ -16542,7 +16542,7 @@ pub fn vqabsh_s16(a: i16) -> i16 { } #[doc = "Signed saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabss_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] @@ -16558,7 +16558,7 @@ pub fn vqabss_s32(a: i32) -> i32 { } #[doc = "Signed saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(sqabs))] @@ -16574,7 +16574,7 @@ pub fn vqabsd_s64(a: i64) -> i64 { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqadd))] @@ -16585,7 +16585,7 @@ pub fn vqaddb_s8(a: i8, b: i8) -> i8 { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqadd))] @@ -16596,7 +16596,7 @@ pub fn vqaddh_s16(a: i16, b: i16) -> i16 { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddb_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqadd))] @@ -16607,7 +16607,7 @@ pub fn vqaddb_u8(a: u8, b: u8) -> u8 { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddh_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqadd))] @@ -16618,7 +16618,7 @@ pub fn vqaddh_u16(a: u16, b: u16) -> u16 { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqadd))] @@ -16634,7 +16634,7 @@ pub fn vqadds_s32(a: i32, b: i32) -> i32 { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqadd))] @@ -16650,7 +16650,7 @@ pub fn vqaddd_s64(a: i64, b: i64) -> i64 { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadds_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqadd))] @@ -16666,7 +16666,7 @@ pub fn vqadds_u32(a: u32, b: u32) -> u32 { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqadd))] @@ -16682,7 +16682,7 @@ pub fn vqaddd_u64(a: u64, b: u64) -> u64 { } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] #[rustc_legacy_const_generics(3)] @@ -16693,7 +16693,7 @@ pub fn vqdmlal_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16x } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] #[rustc_legacy_const_generics(3)] @@ -16704,7 +16704,7 @@ pub fn vqdmlal_high_laneq_s16(a: int32x4_t, b: int16x8_t, c: int16 } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] #[rustc_legacy_const_generics(3)] @@ -16715,7 +16715,7 @@ pub fn vqdmlal_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32x } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2, N = 1))] #[rustc_legacy_const_generics(3)] @@ -16726,7 +16726,7 @@ pub fn vqdmlal_high_laneq_s32(a: int64x2_t, b: int32x4_t, c: int32 } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16735,7 +16735,7 @@ pub fn vqdmlal_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16744,7 +16744,7 @@ pub fn vqdmlal_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16753,7 +16753,7 @@ pub fn vqdmlal_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16762,7 +16762,7 @@ pub fn vqdmlal_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { } #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal, N = 2))] #[rustc_legacy_const_generics(3)] @@ -16773,7 +16773,7 @@ pub fn vqdmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) } #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal, N = 1))] #[rustc_legacy_const_generics(3)] @@ -16784,7 +16784,7 @@ pub fn vqdmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -16795,7 +16795,7 @@ pub fn vqdmlalh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -16806,7 +16806,7 @@ pub fn vqdmlalh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -16817,7 +16817,7 @@ pub fn vqdmlals_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -16828,7 +16828,7 @@ pub fn vqdmlals_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlalh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16838,7 +16838,7 @@ pub fn vqdmlalh_s16(a: i32, b: i16, c: i16) -> i32 { } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlals_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlal))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16848,7 +16848,7 @@ pub fn vqdmlals_s32(a: i64, b: i32, c: i32) -> i64 { } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] #[rustc_legacy_const_generics(3)] @@ -16859,7 +16859,7 @@ pub fn vqdmlsl_high_lane_s16(a: int32x4_t, b: int16x8_t, c: int16x } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] #[rustc_legacy_const_generics(3)] @@ -16870,7 +16870,7 @@ pub fn vqdmlsl_high_laneq_s16(a: int32x4_t, b: int16x8_t, c: int16 } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] #[rustc_legacy_const_generics(3)] @@ -16881,7 +16881,7 @@ pub fn vqdmlsl_high_lane_s32(a: int64x2_t, b: int32x4_t, c: int32x } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2, N = 1))] #[rustc_legacy_const_generics(3)] @@ -16892,7 +16892,7 @@ pub fn vqdmlsl_high_laneq_s32(a: int64x2_t, b: int32x4_t, c: int32 } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16901,7 +16901,7 @@ pub fn vqdmlsl_high_n_s16(a: int32x4_t, b: int16x8_t, c: i16) -> int32x4_t { } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16910,7 +16910,7 @@ pub fn vqdmlsl_high_s16(a: int32x4_t, b: int16x8_t, c: int16x8_t) -> int32x4_t { } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16919,7 +16919,7 @@ pub fn vqdmlsl_high_n_s32(a: int64x2_t, b: int32x4_t, c: i32) -> int64x2_t { } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -16928,7 +16928,7 @@ pub fn vqdmlsl_high_s32(a: int64x2_t, b: int32x4_t, c: int32x4_t) -> int64x2_t { } #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl, N = 2))] #[rustc_legacy_const_generics(3)] @@ -16939,7 +16939,7 @@ pub fn vqdmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t) } #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl, N = 1))] #[rustc_legacy_const_generics(3)] @@ -16950,7 +16950,7 @@ pub fn vqdmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t) } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -16961,7 +16961,7 @@ pub fn vqdmlslh_lane_s16(a: i32, b: i16, c: int16x4_t) -> i32 { } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -16972,7 +16972,7 @@ pub fn vqdmlslh_laneq_s16(a: i32, b: i16, c: int16x8_t) -> i32 } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -16983,7 +16983,7 @@ pub fn vqdmlsls_lane_s32(a: i64, b: i32, c: int32x2_t) -> i64 { } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl, LANE = 0))] #[rustc_legacy_const_generics(3)] @@ -16994,7 +16994,7 @@ pub fn vqdmlsls_laneq_s32(a: i64, b: i32, c: int32x4_t) -> i64 } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlslh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17004,7 +17004,7 @@ pub fn vqdmlslh_s16(a: i32, b: i16, c: i16) -> i32 { } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsls_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmlsl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17014,7 +17014,7 @@ pub fn vqdmlsls_s32(a: i64, b: i32, c: i32) -> i64 { } #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -17025,7 +17025,7 @@ pub fn vqdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_ } #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -17036,7 +17036,7 @@ pub fn vqdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8 } #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -17047,7 +17047,7 @@ pub fn vqdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_ } #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -17058,7 +17058,7 @@ pub fn vqdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4 } #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, N = 2))] #[rustc_legacy_const_generics(2)] @@ -17072,7 +17072,7 @@ pub fn vqdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { } #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, N = 2))] #[rustc_legacy_const_generics(2)] @@ -17086,7 +17086,7 @@ pub fn vqdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { } #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17097,7 +17097,7 @@ pub fn vqdmulhh_s16(a: i16, b: i16) -> i16 { } #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17108,7 +17108,7 @@ pub fn vqdmulhs_s32(a: i32, b: i32) -> i32 { } #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, N = 1))] #[rustc_legacy_const_generics(2)] @@ -17122,7 +17122,7 @@ pub fn vqdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { } #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhs_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmulh, N = 1))] #[rustc_legacy_const_generics(2)] @@ -17136,7 +17136,7 @@ pub fn vqdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -17151,7 +17151,7 @@ pub fn vqdmull_high_lane_s16(a: int16x8_t, b: int16x4_t) -> int32x } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -17166,7 +17166,7 @@ pub fn vqdmull_high_laneq_s32(a: int32x4_t, b: int32x4_t) -> int64 } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2, N = 1))] #[rustc_legacy_const_generics(2)] @@ -17181,7 +17181,7 @@ pub fn vqdmull_high_lane_s32(a: int32x4_t, b: int32x2_t) -> int64x } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2, N = 4))] #[rustc_legacy_const_generics(2)] @@ -17196,7 +17196,7 @@ pub fn vqdmull_high_laneq_s16(a: int16x8_t, b: int16x8_t) -> int32 } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17209,7 +17209,7 @@ pub fn vqdmull_high_n_s16(a: int16x8_t, b: i16) -> int32x4_t { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17222,7 +17222,7 @@ pub fn vqdmull_high_n_s32(a: int32x4_t, b: i32) -> int64x2_t { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17235,7 +17235,7 @@ pub fn vqdmull_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17248,7 +17248,7 @@ pub fn vqdmull_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { } #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 4))] #[rustc_legacy_const_generics(2)] @@ -17262,7 +17262,7 @@ pub fn vqdmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t } #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 2))] #[rustc_legacy_const_generics(2)] @@ -17276,7 +17276,7 @@ pub fn vqdmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 2))] #[rustc_legacy_const_generics(2)] @@ -17290,7 +17290,7 @@ pub fn vqdmullh_lane_s16(a: i16, b: int16x4_t) -> i32 { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 2))] #[rustc_legacy_const_generics(2)] @@ -17304,7 +17304,7 @@ pub fn vqdmulls_laneq_s32(a: i32, b: int32x4_t) -> i64 { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 4))] #[rustc_legacy_const_generics(2)] @@ -17318,7 +17318,7 @@ pub fn vqdmullh_laneq_s16(a: i16, b: int16x8_t) -> i32 { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmullh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17329,7 +17329,7 @@ pub fn vqdmullh_s16(a: i16, b: i16) -> i32 { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull, N = 1))] #[rustc_legacy_const_generics(2)] @@ -17343,7 +17343,7 @@ pub fn vqdmulls_lane_s32(a: i32, b: int32x2_t) -> i64 { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulls_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqdmull))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17359,7 +17359,7 @@ pub fn vqdmulls_s32(a: i32, b: i32) -> i64 { } #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17374,7 +17374,7 @@ pub fn vqmovn_high_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { } #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17383,7 +17383,7 @@ pub fn vqmovn_high_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { } #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17392,7 +17392,7 @@ pub fn vqmovn_high_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { } #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17407,7 +17407,7 @@ pub fn vqmovn_high_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t { } #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17416,7 +17416,7 @@ pub fn vqmovn_high_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_t { } #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_high_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17425,7 +17425,7 @@ pub fn vqmovn_high_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_t { } #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17441,7 +17441,7 @@ pub fn vqmovnd_s64(a: i64) -> i32 { } #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17457,7 +17457,7 @@ pub fn vqmovnd_u64(a: u64) -> u32 { } #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17466,7 +17466,7 @@ pub fn vqmovnh_s16(a: i16) -> i8 { } #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17475,7 +17475,7 @@ pub fn vqmovns_s32(a: i32) -> i16 { } #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovnh_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17484,7 +17484,7 @@ pub fn vqmovnh_u16(a: u16) -> u8 { } #[doc = "Saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovns_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqxtn))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17493,7 +17493,7 @@ pub fn vqmovns_u32(a: u32) -> u16 { } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17508,7 +17508,7 @@ pub fn vqmovun_high_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_t { } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17517,7 +17517,7 @@ pub fn vqmovun_high_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8_t { } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_high_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17526,7 +17526,7 @@ pub fn vqmovun_high_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4_t { } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovunh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17535,7 +17535,7 @@ pub fn vqmovunh_s16(a: i16) -> u8 { } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovuns_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17544,7 +17544,7 @@ pub fn vqmovuns_s32(a: i32) -> u16 { } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovund_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqxtun))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -17553,7 +17553,7 @@ pub fn vqmovund_s64(a: i64) -> u32 { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] @@ -17569,7 +17569,7 @@ pub fn vqneg_s64(a: int64x1_t) -> int64x1_t { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] @@ -17585,7 +17585,7 @@ pub fn vqnegq_s64(a: int64x2_t) -> int64x2_t { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegb_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] @@ -17594,7 +17594,7 @@ pub fn vqnegb_s8(a: i8) -> i8 { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] @@ -17603,7 +17603,7 @@ pub fn vqnegh_s16(a: i16) -> i16 { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegs_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] @@ -17612,7 +17612,7 @@ pub fn vqnegs_s32(a: i32) -> i32 { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqneg))] @@ -17621,7 +17621,7 @@ pub fn vqnegd_s64(a: i64) -> i64 { } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17635,7 +17635,7 @@ pub fn vqrdmlah_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4 } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17649,7 +17649,7 @@ pub fn vqrdmlah_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2 } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17663,7 +17663,7 @@ pub fn vqrdmlah_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17677,7 +17677,7 @@ pub fn vqrdmlah_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17691,7 +17691,7 @@ pub fn vqrdmlahq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17705,7 +17705,7 @@ pub fn vqrdmlahq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17719,7 +17719,7 @@ pub fn vqrdmlahq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16 } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17733,7 +17733,7 @@ pub fn vqrdmlahq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32 } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -17749,7 +17749,7 @@ pub fn vqrdmlah_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -17765,7 +17765,7 @@ pub fn vqrdmlahq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlah_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -17781,7 +17781,7 @@ pub fn vqrdmlah_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -17797,7 +17797,7 @@ pub fn vqrdmlahq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17808,7 +17808,7 @@ pub fn vqrdmlahh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17819,7 +17819,7 @@ pub fn vqrdmlahh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17830,7 +17830,7 @@ pub fn vqrdmlahs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17841,7 +17841,7 @@ pub fn vqrdmlahs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -17853,7 +17853,7 @@ pub fn vqrdmlahh_s16(a: i16, b: i16, c: i16) -> i16 { } #[doc = "Signed saturating rounding doubling multiply accumulate returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlahs_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlah))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -17865,7 +17865,7 @@ pub fn vqrdmlahs_s32(a: i32, b: i32, c: i32) -> i32 { } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17879,7 +17879,7 @@ pub fn vqrdmlsh_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4 } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17893,7 +17893,7 @@ pub fn vqrdmlsh_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2 } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17907,7 +17907,7 @@ pub fn vqrdmlsh_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17921,7 +17921,7 @@ pub fn vqrdmlsh_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17935,7 +17935,7 @@ pub fn vqrdmlshq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17949,7 +17949,7 @@ pub fn vqrdmlshq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17963,7 +17963,7 @@ pub fn vqrdmlshq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16 } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -17977,7 +17977,7 @@ pub fn vqrdmlshq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32 } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -17993,7 +17993,7 @@ pub fn vqrdmlsh_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -18009,7 +18009,7 @@ pub fn vqrdmlshq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlsh_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -18025,7 +18025,7 @@ pub fn vqrdmlsh_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -18041,7 +18041,7 @@ pub fn vqrdmlshq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -18052,7 +18052,7 @@ pub fn vqrdmlshh_lane_s16(a: i16, b: i16, c: int16x4_t) -> i16 } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -18063,7 +18063,7 @@ pub fn vqrdmlshh_laneq_s16(a: i16, b: i16, c: int16x8_t) -> i16 } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -18074,7 +18074,7 @@ pub fn vqrdmlshs_lane_s32(a: i32, b: i32, c: int32x2_t) -> i32 } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh, LANE = 1))] #[rustc_legacy_const_generics(3)] @@ -18085,7 +18085,7 @@ pub fn vqrdmlshs_laneq_s32(a: i32, b: i32, c: int32x4_t) -> i32 } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -18097,7 +18097,7 @@ pub fn vqrdmlshh_s16(a: i16, b: i16, c: i16) -> i16 { } #[doc = "Signed saturating rounding doubling multiply subtract returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmlshs_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "rdm")] #[cfg_attr(test, assert_instr(sqrdmlsh))] #[stable(feature = "rdm_intrinsics", since = "1.62.0")] @@ -18109,7 +18109,7 @@ pub fn vqrdmlshs_s32(a: i32, b: i32, c: i32) -> i32 { } #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -18120,7 +18120,7 @@ pub fn vqrdmulhh_lane_s16(a: i16, b: int16x4_t) -> i16 { } #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -18131,7 +18131,7 @@ pub fn vqrdmulhh_laneq_s16(a: i16, b: int16x8_t) -> i16 { } #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -18142,7 +18142,7 @@ pub fn vqrdmulhs_lane_s32(a: i32, b: int32x2_t) -> i32 { } #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh, LANE = 1))] #[rustc_legacy_const_generics(2)] @@ -18153,7 +18153,7 @@ pub fn vqrdmulhs_laneq_s32(a: i32, b: int32x4_t) -> i32 { } #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18162,7 +18162,7 @@ pub fn vqrdmulhh_s16(a: i16, b: i16) -> i16 { } #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhs_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrdmulh))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18171,7 +18171,7 @@ pub fn vqrdmulhs_s32(a: i32, b: i32) -> i32 { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18182,7 +18182,7 @@ pub fn vqrshlb_s8(a: i8, b: i8) -> i8 { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18193,7 +18193,7 @@ pub fn vqrshlh_s16(a: i16, b: i16) -> i16 { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlb_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18204,7 +18204,7 @@ pub fn vqrshlb_u8(a: u8, b: i8) -> u8 { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlh_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18215,7 +18215,7 @@ pub fn vqrshlh_u16(a: u16, b: i16) -> u16 { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18231,7 +18231,7 @@ pub fn vqrshld_s64(a: i64, b: i64) -> i64 { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18247,7 +18247,7 @@ pub fn vqrshls_s32(a: i32, b: i32) -> i32 { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshls_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18263,7 +18263,7 @@ pub fn vqrshls_u32(a: u32, b: i32) -> u32 { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshld_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18279,7 +18279,7 @@ pub fn vqrshld_u64(a: u64, b: i64) -> u64 { } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18296,7 +18296,7 @@ pub fn vqrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18307,7 +18307,7 @@ pub fn vqrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18318,7 +18318,7 @@ pub fn vqrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t } #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18335,7 +18335,7 @@ pub fn vqrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16 } #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18346,7 +18346,7 @@ pub fn vqrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x } #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_high_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18357,7 +18357,7 @@ pub fn vqrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x } #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18369,7 +18369,7 @@ pub fn vqrshrnd_n_u64(a: u64) -> u32 { } #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18381,7 +18381,7 @@ pub fn vqrshrnh_n_u16(a: u16) -> u8 { } #[doc = "Unsigned saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18393,7 +18393,7 @@ pub fn vqrshrns_n_u32(a: u32) -> u16 { } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnh_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18405,7 +18405,7 @@ pub fn vqrshrnh_n_s16(a: i16) -> i8 { } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrns_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18417,7 +18417,7 @@ pub fn vqrshrns_n_s32(a: i32) -> i16 { } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrnd_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18429,7 +18429,7 @@ pub fn vqrshrnd_n_s64(a: i64) -> i32 { } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18446,7 +18446,7 @@ pub fn vqrshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16 } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18457,7 +18457,7 @@ pub fn vqrshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_high_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18468,7 +18468,7 @@ pub fn vqrshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrund_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18480,7 +18480,7 @@ pub fn vqrshrund_n_s64(a: i64) -> u32 { } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrunh_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18492,7 +18492,7 @@ pub fn vqrshrunh_n_s16(a: i16) -> u8 { } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshruns_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqrshrun, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18504,7 +18504,7 @@ pub fn vqrshruns_n_s32(a: i32) -> u16 { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18515,7 +18515,7 @@ pub fn vqshlb_n_s8(a: i8) -> i8 { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18526,7 +18526,7 @@ pub fn vqshld_n_s64(a: i64) -> i64 { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18537,7 +18537,7 @@ pub fn vqshlh_n_s16(a: i16) -> i16 { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18548,7 +18548,7 @@ pub fn vqshls_n_s32(a: i32) -> i32 { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18559,7 +18559,7 @@ pub fn vqshlb_n_u8(a: u8) -> u8 { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18570,7 +18570,7 @@ pub fn vqshld_n_u64(a: u64) -> u64 { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18581,7 +18581,7 @@ pub fn vqshlh_n_u16(a: u16) -> u16 { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18592,7 +18592,7 @@ pub fn vqshls_n_u32(a: u32) -> u32 { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18602,7 +18602,7 @@ pub fn vqshlb_s8(a: i8, b: i8) -> i8 { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18612,7 +18612,7 @@ pub fn vqshlh_s16(a: i16, b: i16) -> i16 { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18622,7 +18622,7 @@ pub fn vqshls_s32(a: i32, b: i32) -> i32 { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlb_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18632,7 +18632,7 @@ pub fn vqshlb_u8(a: u8, b: i8) -> u8 { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlh_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18642,7 +18642,7 @@ pub fn vqshlh_u16(a: u16, b: i16) -> u16 { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshls_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18652,7 +18652,7 @@ pub fn vqshls_u32(a: u32, b: i32) -> u32 { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18668,7 +18668,7 @@ pub fn vqshld_s64(a: i64, b: i64) -> i64 { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshld_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18684,7 +18684,7 @@ pub fn vqshld_u64(a: u64, b: i64) -> u64 { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlub_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18695,7 +18695,7 @@ pub fn vqshlub_n_s8(a: i8) -> u8 { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlud_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18706,7 +18706,7 @@ pub fn vqshlud_n_s64(a: i64) -> u64 { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluh_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18717,7 +18717,7 @@ pub fn vqshluh_n_s16(a: i16) -> u16 { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlus_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18728,7 +18728,7 @@ pub fn vqshlus_n_s32(a: i32) -> u32 { } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18745,7 +18745,7 @@ pub fn vqshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18756,7 +18756,7 @@ pub fn vqshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18767,7 +18767,7 @@ pub fn vqshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18784,7 +18784,7 @@ pub fn vqshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_ } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18795,7 +18795,7 @@ pub fn vqshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8 } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_high_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18806,7 +18806,7 @@ pub fn vqshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4 } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18824,7 +18824,7 @@ pub fn vqshrnd_n_s64(a: i64) -> i32 { } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnd_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18842,7 +18842,7 @@ pub fn vqshrnd_n_u64(a: u64) -> u32 { } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18853,7 +18853,7 @@ pub fn vqshrnh_n_s16(a: i16) -> i8 { } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18864,7 +18864,7 @@ pub fn vqshrns_n_s32(a: i32) -> i16 { } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrnh_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18875,7 +18875,7 @@ pub fn vqshrnh_n_u16(a: u16) -> u8 { } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrns_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(uqshrn, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18886,7 +18886,7 @@ pub fn vqshrns_n_u32(a: u32) -> u16 { } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18903,7 +18903,7 @@ pub fn vqshrun_high_n_s16(a: uint8x8_t, b: int16x8_t) -> uint8x16_ } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18914,7 +18914,7 @@ pub fn vqshrun_high_n_s32(a: uint16x4_t, b: int32x4_t) -> uint16x8 } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_high_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -18925,7 +18925,7 @@ pub fn vqshrun_high_n_s64(a: uint32x2_t, b: int64x2_t) -> uint32x4 } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrund_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18936,7 +18936,7 @@ pub fn vqshrund_n_s64(a: i64) -> u32 { } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrunh_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18947,7 +18947,7 @@ pub fn vqshrunh_n_s16(a: i16) -> u8 { } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshruns_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sqshrun, N = 2))] #[rustc_legacy_const_generics(1)] @@ -18958,7 +18958,7 @@ pub fn vqshruns_n_s32(a: i32) -> u16 { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqsub))] @@ -18969,7 +18969,7 @@ pub fn vqsubb_s8(a: i8, b: i8) -> i8 { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqsub))] @@ -18980,7 +18980,7 @@ pub fn vqsubh_s16(a: i16, b: i16) -> i16 { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubb_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqsub))] @@ -18991,7 +18991,7 @@ pub fn vqsubb_u8(a: u8, b: u8) -> u8 { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubh_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqsub))] @@ -19002,7 +19002,7 @@ pub fn vqsubh_u16(a: u16, b: u16) -> u16 { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqsub))] @@ -19018,7 +19018,7 @@ pub fn vqsubs_s32(a: i32, b: i32) -> i32 { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sqsub))] @@ -19034,7 +19034,7 @@ pub fn vqsubd_s64(a: i64, b: i64) -> i64 { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubs_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqsub))] @@ -19050,7 +19050,7 @@ pub fn vqsubs_u32(a: u32, b: u32) -> u32 { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(uqsub))] @@ -19066,7 +19066,7 @@ pub fn vqsubd_u64(a: u64, b: u64) -> u64 { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19082,7 +19082,7 @@ fn vqtbl1(a: int8x16_t, b: uint8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19098,7 +19098,7 @@ fn vqtbl1q(a: int8x16_t, b: uint8x16_t) -> int8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19107,7 +19107,7 @@ pub fn vqtbl1_s8(a: int8x16_t, b: uint8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19116,7 +19116,7 @@ pub fn vqtbl1q_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19125,7 +19125,7 @@ pub fn vqtbl1_u8(a: uint8x16_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19134,7 +19134,7 @@ pub fn vqtbl1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19143,7 +19143,7 @@ pub fn vqtbl1_p8(a: poly8x16_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl1q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19152,7 +19152,7 @@ pub fn vqtbl1q_p8(a: poly8x16_t, b: uint8x16_t) -> poly8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19168,7 +19168,7 @@ fn vqtbl2(a: int8x16_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19184,7 +19184,7 @@ fn vqtbl2q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19193,7 +19193,7 @@ pub fn vqtbl2_s8(a: int8x16x2_t, b: uint8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19202,7 +19202,7 @@ pub fn vqtbl2q_s8(a: int8x16x2_t, b: uint8x16_t) -> int8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19211,7 +19211,7 @@ pub fn vqtbl2_u8(a: uint8x16x2_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19220,7 +19220,7 @@ pub fn vqtbl2q_u8(a: uint8x16x2_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19229,7 +19229,7 @@ pub fn vqtbl2_p8(a: poly8x16x2_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl2q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19238,7 +19238,7 @@ pub fn vqtbl2q_p8(a: poly8x16x2_t, b: uint8x16_t) -> poly8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19254,7 +19254,7 @@ fn vqtbl3(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19270,7 +19270,7 @@ fn vqtbl3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19279,7 +19279,7 @@ pub fn vqtbl3_s8(a: int8x16x3_t, b: uint8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19288,7 +19288,7 @@ pub fn vqtbl3q_s8(a: int8x16x3_t, b: uint8x16_t) -> int8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19297,7 +19297,7 @@ pub fn vqtbl3_u8(a: uint8x16x3_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19306,7 +19306,7 @@ pub fn vqtbl3q_u8(a: uint8x16x3_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19315,7 +19315,7 @@ pub fn vqtbl3_p8(a: poly8x16x3_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl3q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19324,7 +19324,7 @@ pub fn vqtbl3q_p8(a: poly8x16x3_t, b: uint8x16_t) -> poly8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19346,7 +19346,7 @@ fn vqtbl4(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19368,7 +19368,7 @@ fn vqtbl4q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19377,7 +19377,7 @@ pub fn vqtbl4_s8(a: int8x16x4_t, b: uint8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19386,7 +19386,7 @@ pub fn vqtbl4q_s8(a: int8x16x4_t, b: uint8x16_t) -> int8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19403,7 +19403,7 @@ pub fn vqtbl4_u8(a: uint8x16x4_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19420,7 +19420,7 @@ pub fn vqtbl4q_u8(a: uint8x16x4_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19437,7 +19437,7 @@ pub fn vqtbl4_p8(a: poly8x16x4_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbl4q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19454,7 +19454,7 @@ pub fn vqtbl4q_p8(a: poly8x16x4_t, b: uint8x16_t) -> poly8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19470,7 +19470,7 @@ fn vqtbx1(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19486,7 +19486,7 @@ fn vqtbx1q(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19495,7 +19495,7 @@ pub fn vqtbx1_s8(a: int8x8_t, b: int8x16_t, c: uint8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19504,7 +19504,7 @@ pub fn vqtbx1q_s8(a: int8x16_t, b: int8x16_t, c: uint8x16_t) -> int8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19513,7 +19513,7 @@ pub fn vqtbx1_u8(a: uint8x8_t, b: uint8x16_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19522,7 +19522,7 @@ pub fn vqtbx1q_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19531,7 +19531,7 @@ pub fn vqtbx1_p8(a: poly8x8_t, b: poly8x16_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx1q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19540,7 +19540,7 @@ pub fn vqtbx1q_p8(a: poly8x16_t, b: poly8x16_t, c: uint8x16_t) -> poly8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19556,7 +19556,7 @@ fn vqtbx2(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: uint8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19572,7 +19572,7 @@ fn vqtbx2q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: uint8x16_t) -> int8x16_t } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19581,7 +19581,7 @@ pub fn vqtbx2_s8(a: int8x8_t, b: int8x16x2_t, c: uint8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19590,7 +19590,7 @@ pub fn vqtbx2q_s8(a: int8x16_t, b: int8x16x2_t, c: uint8x16_t) -> int8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19599,7 +19599,7 @@ pub fn vqtbx2_u8(a: uint8x8_t, b: uint8x16x2_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19608,7 +19608,7 @@ pub fn vqtbx2q_u8(a: uint8x16_t, b: uint8x16x2_t, c: uint8x16_t) -> uint8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19617,7 +19617,7 @@ pub fn vqtbx2_p8(a: poly8x8_t, b: poly8x16x2_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx2q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19626,7 +19626,7 @@ pub fn vqtbx2q_p8(a: poly8x16_t, b: poly8x16x2_t, c: uint8x16_t) -> poly8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19643,7 +19643,7 @@ fn vqtbx3(a: int8x8_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x8_t) - } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19665,7 +19665,7 @@ fn vqtbx3q(a: int8x16_t, b: int8x16_t, c: int8x16_t, d: int8x16_t, e: uint8x16_t } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19674,7 +19674,7 @@ pub fn vqtbx3_s8(a: int8x8_t, b: int8x16x3_t, c: uint8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19683,7 +19683,7 @@ pub fn vqtbx3q_s8(a: int8x16_t, b: int8x16x3_t, c: uint8x16_t) -> int8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19700,7 +19700,7 @@ pub fn vqtbx3_u8(a: uint8x8_t, b: uint8x16x3_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19717,7 +19717,7 @@ pub fn vqtbx3q_u8(a: uint8x16_t, b: uint8x16x3_t, c: uint8x16_t) -> uint8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19734,7 +19734,7 @@ pub fn vqtbx3_p8(a: poly8x8_t, b: poly8x16x3_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx3q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19751,7 +19751,7 @@ pub fn vqtbx3q_p8(a: poly8x16_t, b: poly8x16x3_t, c: uint8x16_t) -> poly8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19781,7 +19781,7 @@ fn vqtbx4( } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19811,7 +19811,7 @@ fn vqtbx4q( } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19820,7 +19820,7 @@ pub fn vqtbx4_s8(a: int8x8_t, b: int8x16x4_t, c: uint8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19829,7 +19829,7 @@ pub fn vqtbx4q_s8(a: int8x16_t, b: int8x16x4_t, c: uint8x16_t) -> int8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19847,7 +19847,7 @@ pub fn vqtbx4_u8(a: uint8x8_t, b: uint8x16x4_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19865,7 +19865,7 @@ pub fn vqtbx4q_u8(a: uint8x16_t, b: uint8x16x4_t, c: uint8x16_t) -> uint8x16_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19883,7 +19883,7 @@ pub fn vqtbx4_p8(a: poly8x8_t, b: poly8x16x4_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqtbx4q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19901,7 +19901,7 @@ pub fn vqtbx4q_p8(a: poly8x16_t, b: poly8x16x4_t, c: uint8x16_t) -> poly8x16_t { } #[doc = "Rotate and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrax1q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(rax1))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] @@ -19917,7 +19917,7 @@ pub fn vrax1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] @@ -19926,7 +19926,7 @@ pub fn vrbit_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(rbit))] @@ -19935,7 +19935,7 @@ pub fn vrbitq_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19945,7 +19945,7 @@ pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19959,7 +19959,7 @@ pub fn vrbit_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19969,7 +19969,7 @@ pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19988,7 +19988,7 @@ pub fn vrbitq_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19998,7 +19998,7 @@ pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbit_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20012,7 +20012,7 @@ pub fn vrbit_p8(a: poly8x8_t) -> poly8x8_t { } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20022,7 +20022,7 @@ pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { } #[doc = "Reverse bit order"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrbitq_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20041,7 +20041,7 @@ pub fn vrbitq_p8(a: poly8x16_t) -> poly8x16_t { } #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20057,7 +20057,7 @@ pub fn vrecpe_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20073,7 +20073,7 @@ pub fn vrecpeq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecped_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20089,7 +20089,7 @@ pub fn vrecped_f64(a: f64) -> f64 { } #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpes_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpe))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20105,7 +20105,7 @@ pub fn vrecpes_f32(a: f32) -> f32 { } #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(frecpe))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -20122,7 +20122,7 @@ pub fn vrecpeh_f16(a: f16) -> f16 { } #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20138,7 +20138,7 @@ pub fn vrecps_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20154,7 +20154,7 @@ pub fn vrecpsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20170,7 +20170,7 @@ pub fn vrecpsd_f64(a: f64, b: f64) -> f64 { } #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpss_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecps))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20186,7 +20186,7 @@ pub fn vrecpss_f32(a: f32, b: f32) -> f32 { } #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(frecps))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -20203,7 +20203,7 @@ pub fn vrecpsh_f16(a: f16, b: f16) -> f16 { } #[doc = "Floating-point reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20219,7 +20219,7 @@ pub fn vrecpxd_f64(a: f64) -> f64 { } #[doc = "Floating-point reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxs_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frecpx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20235,7 +20235,7 @@ pub fn vrecpxs_f32(a: f32) -> f32 { } #[doc = "Floating-point reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpxh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(frecpx))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -20252,7 +20252,7 @@ pub fn vrecpxh_f16(a: f16) -> f16 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -20263,7 +20263,7 @@ pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -20277,7 +20277,7 @@ pub fn vreinterpret_f64_f16(a: float16x4_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -20288,7 +20288,7 @@ pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -20303,7 +20303,7 @@ pub fn vreinterpretq_f64_f16(a: float16x8_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -20314,7 +20314,7 @@ pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -20328,7 +20328,7 @@ pub fn vreinterpret_f16_f64(a: float64x1_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -20339,7 +20339,7 @@ pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -20354,7 +20354,7 @@ pub fn vreinterpretq_f16_f64(a: float64x2_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20364,7 +20364,7 @@ pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20377,7 +20377,7 @@ pub fn vreinterpretq_f64_p128(a: p128) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20387,7 +20387,7 @@ pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20400,7 +20400,7 @@ pub fn vreinterpret_f64_f32(a: float32x2_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20410,7 +20410,7 @@ pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20423,7 +20423,7 @@ pub fn vreinterpret_p64_f32(a: float32x2_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20433,7 +20433,7 @@ pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20447,7 +20447,7 @@ pub fn vreinterpretq_f64_f32(a: float32x4_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20457,7 +20457,7 @@ pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20471,7 +20471,7 @@ pub fn vreinterpretq_p64_f32(a: float32x4_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20481,7 +20481,7 @@ pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20494,7 +20494,7 @@ pub fn vreinterpret_f32_f64(a: float64x1_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20504,7 +20504,7 @@ pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20517,7 +20517,7 @@ pub fn vreinterpret_s8_f64(a: float64x1_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20527,7 +20527,7 @@ pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20540,7 +20540,7 @@ pub fn vreinterpret_s16_f64(a: float64x1_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20550,7 +20550,7 @@ pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20563,7 +20563,7 @@ pub fn vreinterpret_s32_f64(a: float64x1_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -20572,7 +20572,7 @@ pub fn vreinterpret_s64_f64(a: float64x1_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20582,7 +20582,7 @@ pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20595,7 +20595,7 @@ pub fn vreinterpret_u8_f64(a: float64x1_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20605,7 +20605,7 @@ pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20618,7 +20618,7 @@ pub fn vreinterpret_u16_f64(a: float64x1_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20628,7 +20628,7 @@ pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20641,7 +20641,7 @@ pub fn vreinterpret_u32_f64(a: float64x1_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -20650,7 +20650,7 @@ pub fn vreinterpret_u64_f64(a: float64x1_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20660,7 +20660,7 @@ pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20673,7 +20673,7 @@ pub fn vreinterpret_p8_f64(a: float64x1_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20683,7 +20683,7 @@ pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20696,7 +20696,7 @@ pub fn vreinterpret_p16_f64(a: float64x1_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -20705,7 +20705,7 @@ pub fn vreinterpret_p64_f64(a: float64x1_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20715,7 +20715,7 @@ pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20728,7 +20728,7 @@ pub fn vreinterpretq_p128_f64(a: float64x2_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20738,7 +20738,7 @@ pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20752,7 +20752,7 @@ pub fn vreinterpretq_f32_f64(a: float64x2_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20762,7 +20762,7 @@ pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20780,7 +20780,7 @@ pub fn vreinterpretq_s8_f64(a: float64x2_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20790,7 +20790,7 @@ pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20804,7 +20804,7 @@ pub fn vreinterpretq_s16_f64(a: float64x2_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20814,7 +20814,7 @@ pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20828,7 +20828,7 @@ pub fn vreinterpretq_s32_f64(a: float64x2_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20838,7 +20838,7 @@ pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20852,7 +20852,7 @@ pub fn vreinterpretq_s64_f64(a: float64x2_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20862,7 +20862,7 @@ pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20880,7 +20880,7 @@ pub fn vreinterpretq_u8_f64(a: float64x2_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20890,7 +20890,7 @@ pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20904,7 +20904,7 @@ pub fn vreinterpretq_u16_f64(a: float64x2_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20914,7 +20914,7 @@ pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20928,7 +20928,7 @@ pub fn vreinterpretq_u32_f64(a: float64x2_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20938,7 +20938,7 @@ pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20952,7 +20952,7 @@ pub fn vreinterpretq_u64_f64(a: float64x2_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20962,7 +20962,7 @@ pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20980,7 +20980,7 @@ pub fn vreinterpretq_p8_f64(a: float64x2_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20990,7 +20990,7 @@ pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21004,7 +21004,7 @@ pub fn vreinterpretq_p16_f64(a: float64x2_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21014,7 +21014,7 @@ pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21028,7 +21028,7 @@ pub fn vreinterpretq_p64_f64(a: float64x2_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21038,7 +21038,7 @@ pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21051,7 +21051,7 @@ pub fn vreinterpret_f64_s8(a: int8x8_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21061,7 +21061,7 @@ pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21076,7 +21076,7 @@ pub fn vreinterpretq_f64_s8(a: int8x16_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21086,7 +21086,7 @@ pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21099,7 +21099,7 @@ pub fn vreinterpret_f64_s16(a: int16x4_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21109,7 +21109,7 @@ pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21123,7 +21123,7 @@ pub fn vreinterpretq_f64_s16(a: int16x8_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21133,7 +21133,7 @@ pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21146,7 +21146,7 @@ pub fn vreinterpret_f64_s32(a: int32x2_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21156,7 +21156,7 @@ pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21170,7 +21170,7 @@ pub fn vreinterpretq_f64_s32(a: int32x4_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21179,7 +21179,7 @@ pub fn vreinterpret_f64_s64(a: int64x1_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21188,7 +21188,7 @@ pub fn vreinterpret_p64_s64(a: int64x1_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21198,7 +21198,7 @@ pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21212,7 +21212,7 @@ pub fn vreinterpretq_f64_s64(a: int64x2_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21222,7 +21222,7 @@ pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21236,7 +21236,7 @@ pub fn vreinterpretq_p64_s64(a: int64x2_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21246,7 +21246,7 @@ pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21259,7 +21259,7 @@ pub fn vreinterpret_f64_u8(a: uint8x8_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21269,7 +21269,7 @@ pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21284,7 +21284,7 @@ pub fn vreinterpretq_f64_u8(a: uint8x16_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21294,7 +21294,7 @@ pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21307,7 +21307,7 @@ pub fn vreinterpret_f64_u16(a: uint16x4_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21317,7 +21317,7 @@ pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21331,7 +21331,7 @@ pub fn vreinterpretq_f64_u16(a: uint16x8_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21341,7 +21341,7 @@ pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21354,7 +21354,7 @@ pub fn vreinterpret_f64_u32(a: uint32x2_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21364,7 +21364,7 @@ pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21378,7 +21378,7 @@ pub fn vreinterpretq_f64_u32(a: uint32x4_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21387,7 +21387,7 @@ pub fn vreinterpret_f64_u64(a: uint64x1_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21396,7 +21396,7 @@ pub fn vreinterpret_p64_u64(a: uint64x1_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21406,7 +21406,7 @@ pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21420,7 +21420,7 @@ pub fn vreinterpretq_f64_u64(a: uint64x2_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21430,7 +21430,7 @@ pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21444,7 +21444,7 @@ pub fn vreinterpretq_p64_u64(a: uint64x2_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21454,7 +21454,7 @@ pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21467,7 +21467,7 @@ pub fn vreinterpret_f64_p8(a: poly8x8_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21477,7 +21477,7 @@ pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21492,7 +21492,7 @@ pub fn vreinterpretq_f64_p8(a: poly8x16_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21502,7 +21502,7 @@ pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21515,7 +21515,7 @@ pub fn vreinterpret_f64_p16(a: poly16x4_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21525,7 +21525,7 @@ pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21539,7 +21539,7 @@ pub fn vreinterpretq_f64_p16(a: poly16x8_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21549,7 +21549,7 @@ pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21562,7 +21562,7 @@ pub fn vreinterpret_f32_p64(a: poly64x1_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f64_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21571,7 +21571,7 @@ pub fn vreinterpret_f64_p64(a: poly64x1_t) -> float64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21580,7 +21580,7 @@ pub fn vreinterpret_s64_p64(a: poly64x1_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -21589,7 +21589,7 @@ pub fn vreinterpret_u64_p64(a: poly64x1_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21599,7 +21599,7 @@ pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21613,7 +21613,7 @@ pub fn vreinterpretq_f32_p64(a: poly64x2_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21623,7 +21623,7 @@ pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f64_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21637,7 +21637,7 @@ pub fn vreinterpretq_f64_p64(a: poly64x2_t) -> float64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21647,7 +21647,7 @@ pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21661,7 +21661,7 @@ pub fn vreinterpretq_s64_p64(a: poly64x2_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21671,7 +21671,7 @@ pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21685,7 +21685,7 @@ pub fn vreinterpretq_u64_p64(a: poly64x2_t) -> uint64x2_t { } #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] @@ -21701,7 +21701,7 @@ pub fn vrnd32x_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] @@ -21717,7 +21717,7 @@ pub fn vrnd32xq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32xq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] @@ -21733,7 +21733,7 @@ pub fn vrnd32xq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to 32-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32x_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32x))] @@ -21749,7 +21749,7 @@ pub fn vrnd32x_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] @@ -21765,7 +21765,7 @@ pub fn vrnd32z_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] @@ -21781,7 +21781,7 @@ pub fn vrnd32zq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32zq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] @@ -21797,7 +21797,7 @@ pub fn vrnd32zq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to 32-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd32z_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint32z))] @@ -21813,7 +21813,7 @@ pub fn vrnd32z_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] @@ -21829,7 +21829,7 @@ pub fn vrnd64x_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] @@ -21845,7 +21845,7 @@ pub fn vrnd64xq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64xq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] @@ -21861,7 +21861,7 @@ pub fn vrnd64xq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to 64-bit integer, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64x_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64x))] @@ -21877,7 +21877,7 @@ pub fn vrnd64x_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] @@ -21893,7 +21893,7 @@ pub fn vrnd64z_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] @@ -21909,7 +21909,7 @@ pub fn vrnd64zq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64zq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] @@ -21925,7 +21925,7 @@ pub fn vrnd64zq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to 64-bit integer toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd64z_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,frintts")] #[unstable(feature = "stdarch_neon_ftts", issue = "117227")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(frint64z))] @@ -21941,7 +21941,7 @@ pub fn vrnd64z_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -21951,7 +21951,7 @@ pub fn vrnd_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -21961,7 +21961,7 @@ pub fn vrndq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] @@ -21970,7 +21970,7 @@ pub fn vrnd_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] @@ -21979,7 +21979,7 @@ pub fn vrndq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] @@ -21988,7 +21988,7 @@ pub fn vrnd_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to integral, toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintz))] @@ -21997,7 +21997,7 @@ pub fn vrndq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -22007,7 +22007,7 @@ pub fn vrnda_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -22017,7 +22017,7 @@ pub fn vrndaq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] @@ -22026,7 +22026,7 @@ pub fn vrnda_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] @@ -22035,7 +22035,7 @@ pub fn vrndaq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrnda_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] @@ -22044,7 +22044,7 @@ pub fn vrnda_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndaq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinta))] @@ -22053,7 +22053,7 @@ pub fn vrndaq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndah_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -22063,7 +22063,7 @@ pub fn vrndah_f16(a: f16) -> f16 { } #[doc = "Floating-point round to integral, to nearest with ties to away"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -22073,7 +22073,7 @@ pub fn vrndh_f16(a: f16) -> f16 { } #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -22090,7 +22090,7 @@ pub fn vrndi_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -22107,7 +22107,7 @@ pub fn vrndiq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] @@ -22123,7 +22123,7 @@ pub fn vrndi_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] @@ -22139,7 +22139,7 @@ pub fn vrndiq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndi_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] @@ -22155,7 +22155,7 @@ pub fn vrndi_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndiq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frinti))] @@ -22171,7 +22171,7 @@ pub fn vrndiq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndih_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -22188,7 +22188,7 @@ pub fn vrndih_f16(a: f16) -> f16 { } #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -22198,7 +22198,7 @@ pub fn vrndm_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -22208,7 +22208,7 @@ pub fn vrndmq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] @@ -22217,7 +22217,7 @@ pub fn vrndm_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] @@ -22226,7 +22226,7 @@ pub fn vrndmq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndm_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] @@ -22235,7 +22235,7 @@ pub fn vrndm_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintm))] @@ -22244,7 +22244,7 @@ pub fn vrndmq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndmh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -22254,7 +22254,7 @@ pub fn vrndmh_f16(a: f16) -> f16 { } #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintn))] @@ -22270,7 +22270,7 @@ pub fn vrndn_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintn))] @@ -22286,7 +22286,7 @@ pub fn vrndnq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to integral, toward minus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -22303,7 +22303,7 @@ pub fn vrndnh_f16(a: f16) -> f16 { } #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndns_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintn))] @@ -22319,7 +22319,7 @@ pub fn vrndns_f32(a: f32) -> f32 { } #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -22329,7 +22329,7 @@ pub fn vrndp_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -22339,7 +22339,7 @@ pub fn vrndpq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] @@ -22348,7 +22348,7 @@ pub fn vrndp_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] @@ -22357,7 +22357,7 @@ pub fn vrndpq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndp_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] @@ -22366,7 +22366,7 @@ pub fn vrndp_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndpq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintp))] @@ -22375,7 +22375,7 @@ pub fn vrndpq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to integral, toward plus infinity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndph_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -22385,7 +22385,7 @@ pub fn vrndph_f16(a: f16) -> f16 { } #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -22395,7 +22395,7 @@ pub fn vrndx_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -22405,7 +22405,7 @@ pub fn vrndxq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] @@ -22414,7 +22414,7 @@ pub fn vrndx_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] @@ -22423,7 +22423,7 @@ pub fn vrndxq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndx_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] @@ -22432,7 +22432,7 @@ pub fn vrndx_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Floating-point round to integral exact, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(frintx))] @@ -22441,7 +22441,7 @@ pub fn vrndxq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndxh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -22451,7 +22451,7 @@ pub fn vrndxh_f16(a: f16) -> f16 { } #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(srshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -22467,7 +22467,7 @@ pub fn vrshld_s64(a: i64, b: i64) -> i64 { } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshld_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(urshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -22483,7 +22483,7 @@ pub fn vrshld_u64(a: u64, b: i64) -> u64 { } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(1)] @@ -22494,7 +22494,7 @@ pub fn vrshrd_n_s64(a: i64) -> i64 { } #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrd_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(1)] @@ -22505,7 +22505,7 @@ pub fn vrshrd_n_u64(a: u64) -> u64 { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -22522,7 +22522,7 @@ pub fn vrshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -22533,7 +22533,7 @@ pub fn vrshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -22544,7 +22544,7 @@ pub fn vrshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -22561,7 +22561,7 @@ pub fn vrshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_ } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -22572,7 +22572,7 @@ pub fn vrshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8 } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_high_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(rshrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -22583,7 +22583,7 @@ pub fn vrshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4 } #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -22599,7 +22599,7 @@ pub fn vrsqrte_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -22615,7 +22615,7 @@ pub fn vrsqrteq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrted_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -22631,7 +22631,7 @@ pub fn vrsqrted_f64(a: f64) -> f64 { } #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtes_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrte))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -22647,7 +22647,7 @@ pub fn vrsqrtes_f32(a: f32) -> f32 { } #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(frsqrte))] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -22664,7 +22664,7 @@ pub fn vrsqrteh_f16(a: f16) -> f16 { } #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -22680,7 +22680,7 @@ pub fn vrsqrts_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -22696,7 +22696,7 @@ pub fn vrsqrtsq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsd_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -22712,7 +22712,7 @@ pub fn vrsqrtsd_f64(a: f64, b: f64) -> f64 { } #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtss_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(frsqrts))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -22728,7 +22728,7 @@ pub fn vrsqrtss_f32(a: f32, b: f32) -> f32 { } #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(test, assert_instr(frsqrts))] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] @@ -22745,7 +22745,7 @@ pub fn vrsqrtsh_f16(a: f16, b: f16) -> f16 { } #[doc = "Signed rounding shift right and accumulate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(srshr, N = 2))] #[rustc_legacy_const_generics(2)] @@ -22757,7 +22757,7 @@ pub fn vrsrad_n_s64(a: i64, b: i64) -> i64 { } #[doc = "Unsigned rounding shift right and accumulate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsrad_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(urshr, N = 2))] #[rustc_legacy_const_generics(2)] @@ -22769,7 +22769,7 @@ pub fn vrsrad_n_u64(a: u64, b: u64) -> u64 { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "little")] #[cfg_attr(test, assert_instr(rsubhn2))] @@ -22780,7 +22780,7 @@ pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "little")] #[cfg_attr(test, assert_instr(rsubhn2))] @@ -22791,7 +22791,7 @@ pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "little")] #[cfg_attr(test, assert_instr(rsubhn2))] @@ -22802,7 +22802,7 @@ pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "little")] #[cfg_attr(test, assert_instr(rsubhn2))] @@ -22813,7 +22813,7 @@ pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_ } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "little")] #[cfg_attr(test, assert_instr(rsubhn2))] @@ -22824,7 +22824,7 @@ pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8 } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "little")] #[cfg_attr(test, assert_instr(rsubhn2))] @@ -22835,7 +22835,7 @@ pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4 } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "big")] #[cfg_attr(test, assert_instr(rsubhn))] @@ -22846,7 +22846,7 @@ pub fn vrsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "big")] #[cfg_attr(test, assert_instr(rsubhn))] @@ -22857,7 +22857,7 @@ pub fn vrsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "big")] #[cfg_attr(test, assert_instr(rsubhn))] @@ -22868,7 +22868,7 @@ pub fn vrsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "big")] #[cfg_attr(test, assert_instr(rsubhn))] @@ -22879,7 +22879,7 @@ pub fn vrsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_ } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "big")] #[cfg_attr(test, assert_instr(rsubhn))] @@ -22890,7 +22890,7 @@ pub fn vrsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8 } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_high_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_endian = "big")] #[cfg_attr(test, assert_instr(rsubhn))] @@ -22901,7 +22901,7 @@ pub fn vrsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4 } #[doc = "Multi-vector floating-point adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f16)"] -#[inline(always)] +#[inline] #[unstable(feature = "stdarch_neon_fp8", issue = "none")] #[target_feature(enable = "neon,fp8")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] @@ -22917,7 +22917,7 @@ pub fn vscale_f16(vn: float16x4_t, vm: int16x4_t) -> float16x4_t { } #[doc = "Multi-vector floating-point adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f16)"] -#[inline(always)] +#[inline] #[unstable(feature = "stdarch_neon_fp8", issue = "none")] #[target_feature(enable = "neon,fp8")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] @@ -22933,7 +22933,7 @@ pub fn vscaleq_f16(vn: float16x8_t, vm: int16x8_t) -> float16x8_t { } #[doc = "Multi-vector floating-point adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscale_f32)"] -#[inline(always)] +#[inline] #[unstable(feature = "stdarch_neon_fp8", issue = "none")] #[target_feature(enable = "neon,fp8")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] @@ -22949,7 +22949,7 @@ pub fn vscale_f32(vn: float32x2_t, vm: int32x2_t) -> float32x2_t { } #[doc = "Multi-vector floating-point adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f32)"] -#[inline(always)] +#[inline] #[unstable(feature = "stdarch_neon_fp8", issue = "none")] #[target_feature(enable = "neon,fp8")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] @@ -22965,7 +22965,7 @@ pub fn vscaleq_f32(vn: float32x4_t, vm: int32x4_t) -> float32x4_t { } #[doc = "Multi-vector floating-point adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vscaleq_f64)"] -#[inline(always)] +#[inline] #[unstable(feature = "stdarch_neon_fp8", issue = "none")] #[target_feature(enable = "neon,fp8")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(fscale))] @@ -22981,7 +22981,7 @@ pub fn vscaleq_f64(vn: float64x2_t, vm: int64x2_t) -> float64x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -22992,7 +22992,7 @@ pub fn vset_lane_f64(a: f64, b: float64x1_t) -> float64x1_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -23003,7 +23003,7 @@ pub fn vsetq_lane_f64(a: f64, b: float64x2_t) -> float64x2_t { } #[doc = "SHA512 hash update part 2"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512h2q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(sha512h2))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] @@ -23019,7 +23019,7 @@ pub fn vsha512h2q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t } #[doc = "SHA512 hash update part 1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512hq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(sha512h))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] @@ -23035,7 +23035,7 @@ pub fn vsha512hq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t } #[doc = "SHA512 schedule update 0"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su0q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(sha512su0))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] @@ -23051,7 +23051,7 @@ pub fn vsha512su0q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "SHA512 schedule update 1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha512su1q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(sha512su1))] #[stable(feature = "stdarch_neon_sha3", since = "1.79.0")] @@ -23067,7 +23067,7 @@ pub fn vsha512su1q_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_ } #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -23076,7 +23076,7 @@ pub fn vshld_s64(a: i64, b: i64) -> i64 { } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshld_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ushl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -23085,7 +23085,7 @@ pub fn vshld_u64(a: u64, b: i64) -> u64 { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshll2, N = 2))] #[rustc_legacy_const_generics(1)] @@ -23099,7 +23099,7 @@ pub fn vshll_high_n_s8(a: int8x16_t) -> int16x8_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshll2, N = 2))] #[rustc_legacy_const_generics(1)] @@ -23113,7 +23113,7 @@ pub fn vshll_high_n_s16(a: int16x8_t) -> int32x4_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sshll2, N = 2))] #[rustc_legacy_const_generics(1)] @@ -23127,7 +23127,7 @@ pub fn vshll_high_n_s32(a: int32x4_t) -> int64x2_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ushll2, N = 2))] #[rustc_legacy_const_generics(1)] @@ -23141,7 +23141,7 @@ pub fn vshll_high_n_u8(a: uint8x16_t) -> uint16x8_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ushll2, N = 2))] #[rustc_legacy_const_generics(1)] @@ -23155,7 +23155,7 @@ pub fn vshll_high_n_u16(a: uint16x8_t) -> uint32x4_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_high_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(ushll2, N = 2))] #[rustc_legacy_const_generics(1)] @@ -23169,7 +23169,7 @@ pub fn vshll_high_n_u32(a: uint32x4_t) -> uint64x2_t { } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -23186,7 +23186,7 @@ pub fn vshrn_high_n_s16(a: int8x8_t, b: int16x8_t) -> int8x16_t { } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -23197,7 +23197,7 @@ pub fn vshrn_high_n_s32(a: int16x4_t, b: int32x4_t) -> int16x8_t { } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -23208,7 +23208,7 @@ pub fn vshrn_high_n_s64(a: int32x2_t, b: int64x2_t) -> int32x4_t { } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -23225,7 +23225,7 @@ pub fn vshrn_high_n_u16(a: uint8x8_t, b: uint16x8_t) -> uint8x16_t } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -23236,7 +23236,7 @@ pub fn vshrn_high_n_u32(a: uint16x4_t, b: uint32x4_t) -> uint16x8_ } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_high_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(shrn2, N = 2))] #[rustc_legacy_const_generics(2)] @@ -23247,7 +23247,7 @@ pub fn vshrn_high_n_u64(a: uint32x2_t, b: uint64x2_t) -> uint32x4_ } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23265,7 +23265,7 @@ pub fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23283,7 +23283,7 @@ pub fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23301,7 +23301,7 @@ pub fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23319,7 +23319,7 @@ pub fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23337,7 +23337,7 @@ pub fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23355,7 +23355,7 @@ pub fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23373,7 +23373,7 @@ pub fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23391,7 +23391,7 @@ pub fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23402,7 +23402,7 @@ pub fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23413,7 +23413,7 @@ pub fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23424,7 +23424,7 @@ pub fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23435,7 +23435,7 @@ pub fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23446,7 +23446,7 @@ pub fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23457,7 +23457,7 @@ pub fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23468,7 +23468,7 @@ pub fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23479,7 +23479,7 @@ pub fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23490,7 +23490,7 @@ pub fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23501,7 +23501,7 @@ pub fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23512,7 +23512,7 @@ pub fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23523,7 +23523,7 @@ pub fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23534,7 +23534,7 @@ pub fn vsli_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(sli, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23545,7 +23545,7 @@ pub fn vsliq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { } #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[rustc_legacy_const_generics(2)] @@ -23556,7 +23556,7 @@ pub fn vslid_n_s64(a: i64, b: i64) -> i64 { } #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vslid_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[rustc_legacy_const_generics(2)] @@ -23567,7 +23567,7 @@ pub fn vslid_n_u64(a: u64, b: u64) -> u64 { } #[doc = "SM3PARTW1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw1q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3partw1))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] @@ -23583,7 +23583,7 @@ pub fn vsm3partw1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_ } #[doc = "SM3PARTW2"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3partw2q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3partw2))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] @@ -23599,7 +23599,7 @@ pub fn vsm3partw2q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_ } #[doc = "SM3SS1"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3ss1q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3ss1))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] @@ -23615,7 +23615,7 @@ pub fn vsm3ss1q_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { } #[doc = "SM3TT1A"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1aq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3tt1a, IMM2 = 0))] #[rustc_legacy_const_generics(3)] @@ -23633,7 +23633,7 @@ pub fn vsm3tt1aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_ } #[doc = "SM3TT1B"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt1bq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3tt1b, IMM2 = 0))] #[rustc_legacy_const_generics(3)] @@ -23651,7 +23651,7 @@ pub fn vsm3tt1bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_ } #[doc = "SM3TT2A"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2aq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3tt2a, IMM2 = 0))] #[rustc_legacy_const_generics(3)] @@ -23669,7 +23669,7 @@ pub fn vsm3tt2aq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_ } #[doc = "SM3TT2B"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm3tt2bq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm3tt2b, IMM2 = 0))] #[rustc_legacy_const_generics(3)] @@ -23687,7 +23687,7 @@ pub fn vsm3tt2bq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_ } #[doc = "SM4 key"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4ekeyq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm4ekey))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] @@ -23703,7 +23703,7 @@ pub fn vsm4ekeyq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "SM4 encode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsm4eq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sm4")] #[cfg_attr(test, assert_instr(sm4e))] #[unstable(feature = "stdarch_neon_sm4", issue = "117226")] @@ -23719,7 +23719,7 @@ pub fn vsm4eq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] @@ -23735,7 +23735,7 @@ pub fn vsqadd_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] @@ -23751,7 +23751,7 @@ pub fn vsqaddq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] @@ -23767,7 +23767,7 @@ pub fn vsqadd_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] @@ -23783,7 +23783,7 @@ pub fn vsqaddq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] @@ -23799,7 +23799,7 @@ pub fn vsqadd_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] @@ -23815,7 +23815,7 @@ pub fn vsqaddq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] @@ -23831,7 +23831,7 @@ pub fn vsqadd_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Unsigned saturating Accumulate of Signed value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usqadd))] @@ -23847,7 +23847,7 @@ pub fn vsqaddq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Unsigned saturating accumulate of signed value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddb_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -23856,7 +23856,7 @@ pub fn vsqaddb_u8(a: u8, b: i8) -> u8 { } #[doc = "Unsigned saturating accumulate of signed value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddh_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -23865,7 +23865,7 @@ pub fn vsqaddh_u16(a: u16, b: i16) -> u16 { } #[doc = "Unsigned saturating accumulate of signed value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqaddd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -23881,7 +23881,7 @@ pub fn vsqaddd_u64(a: u64, b: i64) -> u64 { } #[doc = "Unsigned saturating accumulate of signed value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqadds_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(usqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -23897,7 +23897,7 @@ pub fn vsqadds_u32(a: u32, b: i32) -> u32 { } #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fsqrt))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -23907,7 +23907,7 @@ pub fn vsqrt_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(test, assert_instr(fsqrt))] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] @@ -23917,7 +23917,7 @@ pub fn vsqrtq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -23926,7 +23926,7 @@ pub fn vsqrt_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -23935,7 +23935,7 @@ pub fn vsqrtq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrt_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -23944,7 +23944,7 @@ pub fn vsqrt_f64(a: float64x1_t) -> float64x1_t { } #[doc = "Calculates the square root of each lane."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrtq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(fsqrt))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -23953,7 +23953,7 @@ pub fn vsqrtq_f64(a: float64x2_t) -> float64x2_t { } #[doc = "Floating-point round to integral, using current rounding mode"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsqrth_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -23963,7 +23963,7 @@ pub fn vsqrth_f16(a: f16) -> f16 { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23974,7 +23974,7 @@ pub fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23985,7 +23985,7 @@ pub fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -23996,7 +23996,7 @@ pub fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24007,7 +24007,7 @@ pub fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24018,7 +24018,7 @@ pub fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24029,7 +24029,7 @@ pub fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24040,7 +24040,7 @@ pub fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24051,7 +24051,7 @@ pub fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24062,7 +24062,7 @@ pub fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24073,7 +24073,7 @@ pub fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24084,7 +24084,7 @@ pub fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24095,7 +24095,7 @@ pub fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24106,7 +24106,7 @@ pub fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24117,7 +24117,7 @@ pub fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24128,7 +24128,7 @@ pub fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24139,7 +24139,7 @@ pub fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24150,7 +24150,7 @@ pub fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24161,7 +24161,7 @@ pub fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24172,7 +24172,7 @@ pub fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24183,7 +24183,7 @@ pub fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24194,7 +24194,7 @@ pub fn vsri_n_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(sri, N = 1))] #[rustc_legacy_const_generics(2)] @@ -24205,7 +24205,7 @@ pub fn vsriq_n_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { } #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[rustc_legacy_const_generics(2)] @@ -24216,7 +24216,7 @@ pub fn vsrid_n_s64(a: i64, b: i64) -> i64 { } #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsrid_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[rustc_legacy_const_generics(2)] @@ -24229,7 +24229,7 @@ pub fn vsrid_n_u64(a: u64, b: u64) -> u64 { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24242,7 +24242,7 @@ pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24255,7 +24255,7 @@ pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24267,7 +24267,7 @@ pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24279,7 +24279,7 @@ pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24291,7 +24291,7 @@ pub unsafe fn vst1_f64(ptr: *mut f64, a: float64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24303,7 +24303,7 @@ pub unsafe fn vst1q_f64(ptr: *mut f64, a: float64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24315,7 +24315,7 @@ pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24327,7 +24327,7 @@ pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24339,7 +24339,7 @@ pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24351,7 +24351,7 @@ pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24363,7 +24363,7 @@ pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24375,7 +24375,7 @@ pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24387,7 +24387,7 @@ pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24399,7 +24399,7 @@ pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24411,7 +24411,7 @@ pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24423,7 +24423,7 @@ pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24435,7 +24435,7 @@ pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24447,7 +24447,7 @@ pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24459,7 +24459,7 @@ pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24471,7 +24471,7 @@ pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24483,7 +24483,7 @@ pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24495,7 +24495,7 @@ pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24507,7 +24507,7 @@ pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24519,7 +24519,7 @@ pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24531,7 +24531,7 @@ pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24543,7 +24543,7 @@ pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24555,7 +24555,7 @@ pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(str))] #[allow(clippy::cast_ptr_alignment)] @@ -24567,7 +24567,7 @@ pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24585,7 +24585,7 @@ pub unsafe fn vst1_f64_x2(a: *mut f64, b: float64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24603,7 +24603,7 @@ pub unsafe fn vst1q_f64_x2(a: *mut f64, b: float64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24621,7 +24621,7 @@ pub unsafe fn vst1_f64_x3(a: *mut f64, b: float64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24639,7 +24639,7 @@ pub unsafe fn vst1q_f64_x3(a: *mut f64, b: float64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24663,7 +24663,7 @@ pub unsafe fn vst1_f64_x4(a: *mut f64, b: float64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st1))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24687,7 +24687,7 @@ pub unsafe fn vst1q_f64_x4(a: *mut f64, b: float64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24700,7 +24700,7 @@ pub unsafe fn vst1_lane_f64(a: *mut f64, b: float64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(nop, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24713,7 +24713,7 @@ pub unsafe fn vst1q_lane_f64(a: *mut f64, b: float64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(stp))] @@ -24724,7 +24724,7 @@ pub unsafe fn vst2_f64(a: *mut f64, b: float64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24744,7 +24744,7 @@ pub unsafe fn vst2_lane_f64(a: *mut f64, b: float64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24764,7 +24764,7 @@ pub unsafe fn vst2_lane_s64(a: *mut i64, b: int64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24777,7 +24777,7 @@ pub unsafe fn vst2_lane_p64(a: *mut p64, b: poly64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24790,7 +24790,7 @@ pub unsafe fn vst2_lane_u64(a: *mut u64, b: uint64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] @@ -24801,7 +24801,7 @@ pub unsafe fn vst2q_f64(a: *mut f64, b: float64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] @@ -24812,7 +24812,7 @@ pub unsafe fn vst2q_s64(a: *mut i64, b: int64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24832,7 +24832,7 @@ pub unsafe fn vst2q_lane_f64(a: *mut f64, b: float64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24852,7 +24852,7 @@ pub unsafe fn vst2q_lane_s8(a: *mut i8, b: int8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24872,7 +24872,7 @@ pub unsafe fn vst2q_lane_s64(a: *mut i64, b: int64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24885,7 +24885,7 @@ pub unsafe fn vst2q_lane_p64(a: *mut p64, b: poly64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24898,7 +24898,7 @@ pub unsafe fn vst2q_lane_u8(a: *mut u8, b: uint8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24911,7 +24911,7 @@ pub unsafe fn vst2q_lane_u64(a: *mut u64, b: uint64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st2, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24924,7 +24924,7 @@ pub unsafe fn vst2q_lane_p8(a: *mut p8, b: poly8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st2))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24935,7 +24935,7 @@ pub unsafe fn vst2q_p64(a: *mut p64, b: poly64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st2))] @@ -24946,7 +24946,7 @@ pub unsafe fn vst2q_u64(a: *mut u64, b: uint64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -24957,7 +24957,7 @@ pub unsafe fn vst3_f64(a: *mut f64, b: float64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24977,7 +24977,7 @@ pub unsafe fn vst3_lane_f64(a: *mut f64, b: float64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -24997,7 +24997,7 @@ pub unsafe fn vst3_lane_s64(a: *mut i64, b: int64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] @@ -25010,7 +25010,7 @@ pub unsafe fn vst3_lane_p64(a: *mut p64, b: poly64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] @@ -25023,7 +25023,7 @@ pub unsafe fn vst3_lane_u64(a: *mut u64, b: uint64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] @@ -25034,7 +25034,7 @@ pub unsafe fn vst3q_f64(a: *mut f64, b: float64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] @@ -25045,7 +25045,7 @@ pub unsafe fn vst3q_s64(a: *mut i64, b: int64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25065,7 +25065,7 @@ pub unsafe fn vst3q_lane_f64(a: *mut f64, b: float64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25085,7 +25085,7 @@ pub unsafe fn vst3q_lane_s8(a: *mut i8, b: int8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25105,7 +25105,7 @@ pub unsafe fn vst3q_lane_s64(a: *mut i64, b: int64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] @@ -25118,7 +25118,7 @@ pub unsafe fn vst3q_lane_p64(a: *mut p64, b: poly64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] @@ -25131,7 +25131,7 @@ pub unsafe fn vst3q_lane_u8(a: *mut u8, b: uint8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] @@ -25144,7 +25144,7 @@ pub unsafe fn vst3q_lane_u64(a: *mut u64, b: uint64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3, LANE = 0))] @@ -25157,7 +25157,7 @@ pub unsafe fn vst3q_lane_p8(a: *mut p8, b: poly8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st3))] @@ -25168,7 +25168,7 @@ pub unsafe fn vst3q_p64(a: *mut p64, b: poly64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st3))] @@ -25179,7 +25179,7 @@ pub unsafe fn vst3q_u64(a: *mut u64, b: uint64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(nop))] @@ -25190,7 +25190,7 @@ pub unsafe fn vst4_f64(a: *mut f64, b: float64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25217,7 +25217,7 @@ pub unsafe fn vst4_lane_f64(a: *mut f64, b: float64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25244,7 +25244,7 @@ pub unsafe fn vst4_lane_s64(a: *mut i64, b: int64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] @@ -25257,7 +25257,7 @@ pub unsafe fn vst4_lane_p64(a: *mut p64, b: poly64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] @@ -25270,7 +25270,7 @@ pub unsafe fn vst4_lane_u64(a: *mut u64, b: uint64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] @@ -25281,7 +25281,7 @@ pub unsafe fn vst4q_f64(a: *mut f64, b: float64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] @@ -25292,7 +25292,7 @@ pub unsafe fn vst4q_s64(a: *mut i64, b: int64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25319,7 +25319,7 @@ pub unsafe fn vst4q_lane_f64(a: *mut f64, b: float64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25346,7 +25346,7 @@ pub unsafe fn vst4q_lane_s8(a: *mut i8, b: int8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25373,7 +25373,7 @@ pub unsafe fn vst4q_lane_s64(a: *mut i64, b: int64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] @@ -25386,7 +25386,7 @@ pub unsafe fn vst4q_lane_p64(a: *mut p64, b: poly64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] @@ -25399,7 +25399,7 @@ pub unsafe fn vst4q_lane_u8(a: *mut u8, b: uint8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] @@ -25412,7 +25412,7 @@ pub unsafe fn vst4q_lane_u64(a: *mut u64, b: uint64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4, LANE = 0))] @@ -25425,7 +25425,7 @@ pub unsafe fn vst4q_lane_p8(a: *mut p8, b: poly8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[target_feature(enable = "neon,aes")] #[cfg_attr(test, assert_instr(st4))] @@ -25436,7 +25436,7 @@ pub unsafe fn vst4q_p64(a: *mut p64, b: poly64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(st4))] @@ -25447,7 +25447,7 @@ pub unsafe fn vst4q_u64(a: *mut u64, b: uint64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_f64)"] #[doc = "## Safety"] #[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25461,7 +25461,7 @@ pub unsafe fn vstl1_lane_f64(ptr: *mut f64, val: float64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_f64)"] #[doc = "## Safety"] #[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25475,7 +25475,7 @@ pub unsafe fn vstl1q_lane_f64(ptr: *mut f64, val: float64x2_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_u64)"] #[doc = "## Safety"] #[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25489,7 +25489,7 @@ pub unsafe fn vstl1_lane_u64(ptr: *mut u64, val: uint64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_u64)"] #[doc = "## Safety"] #[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25503,7 +25503,7 @@ pub unsafe fn vstl1q_lane_u64(ptr: *mut u64, val: uint64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_p64)"] #[doc = "## Safety"] #[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25517,7 +25517,7 @@ pub unsafe fn vstl1_lane_p64(ptr: *mut p64, val: poly64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_p64)"] #[doc = "## Safety"] #[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25531,7 +25531,7 @@ pub unsafe fn vstl1q_lane_p64(ptr: *mut p64, val: poly64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1_lane_s64)"] #[doc = "## Safety"] #[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25547,7 +25547,7 @@ pub unsafe fn vstl1_lane_s64(ptr: *mut i64, val: int64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstl1q_lane_s64)"] #[doc = "## Safety"] #[doc = " * The pointer in `ptr` must satisfy the requirements of [`core::ptr::write`]."] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,rcpc3")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(stl1, LANE = 0))] #[rustc_legacy_const_generics(2)] @@ -25561,7 +25561,7 @@ pub unsafe fn vstl1q_lane_s64(ptr: *mut i64, val: int64x2_t) { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fsub))] @@ -25570,7 +25570,7 @@ pub fn vsub_f64(a: float64x1_t, b: float64x1_t) -> float64x1_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(fsub))] @@ -25579,7 +25579,7 @@ pub fn vsubq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sub))] @@ -25588,7 +25588,7 @@ pub fn vsubd_s64(a: i64, b: i64) -> i64 { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(sub))] @@ -25597,7 +25597,7 @@ pub fn vsubd_u64(a: u64, b: u64) -> u64 { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubh_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[unstable(feature = "stdarch_neon_f16", issue = "136306")] #[cfg(not(target_arch = "arm64ec"))] @@ -25607,7 +25607,7 @@ pub fn vsubh_f16(a: f16, b: f16) -> f16 { } #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ssubl2))] @@ -25622,7 +25622,7 @@ pub fn vsubl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { } #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ssubl2))] @@ -25637,7 +25637,7 @@ pub fn vsubl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { } #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ssubl2))] @@ -25652,7 +25652,7 @@ pub fn vsubl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { } #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usubl2))] @@ -25667,7 +25667,7 @@ pub fn vsubl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { } #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usubl2))] @@ -25682,7 +25682,7 @@ pub fn vsubl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { } #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usubl2))] @@ -25697,7 +25697,7 @@ pub fn vsubl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { } #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ssubw2))] @@ -25709,7 +25709,7 @@ pub fn vsubw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { } #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ssubw2))] @@ -25721,7 +25721,7 @@ pub fn vsubw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { } #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(ssubw2))] @@ -25733,7 +25733,7 @@ pub fn vsubw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { } #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usubw2))] @@ -25745,7 +25745,7 @@ pub fn vsubw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { } #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usubw2))] @@ -25757,7 +25757,7 @@ pub fn vsubw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { } #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(usubw2))] @@ -25769,7 +25769,7 @@ pub fn vsubw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25782,7 +25782,7 @@ pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25791,7 +25791,7 @@ pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25800,7 +25800,7 @@ pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25809,7 +25809,7 @@ pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25818,7 +25818,7 @@ pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25827,7 +25827,7 @@ pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25840,7 +25840,7 @@ pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25853,7 +25853,7 @@ pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25866,7 +25866,7 @@ pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25876,7 +25876,7 @@ pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25886,7 +25886,7 @@ pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbl))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25896,7 +25896,7 @@ pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25915,7 +25915,7 @@ pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25934,7 +25934,7 @@ pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25953,7 +25953,7 @@ pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25962,7 +25962,7 @@ pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25971,7 +25971,7 @@ pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25980,7 +25980,7 @@ pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26004,7 +26004,7 @@ pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26023,7 +26023,7 @@ pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26042,7 +26042,7 @@ pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26058,7 +26058,7 @@ pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26074,7 +26074,7 @@ pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tbx))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26090,7 +26090,7 @@ pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -26100,7 +26100,7 @@ pub fn vtrn1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -26110,7 +26110,7 @@ pub fn vtrn1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26119,7 +26119,7 @@ pub fn vtrn1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26128,7 +26128,7 @@ pub fn vtrn1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26137,7 +26137,7 @@ pub fn vtrn1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26146,7 +26146,7 @@ pub fn vtrn1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26155,7 +26155,7 @@ pub fn vtrn1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26164,7 +26164,7 @@ pub fn vtrn1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26173,7 +26173,7 @@ pub fn vtrn1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26182,7 +26182,7 @@ pub fn vtrn1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26191,7 +26191,7 @@ pub fn vtrn1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26206,7 +26206,7 @@ pub fn vtrn1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26215,7 +26215,7 @@ pub fn vtrn1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26224,7 +26224,7 @@ pub fn vtrn1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26233,7 +26233,7 @@ pub fn vtrn1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26242,7 +26242,7 @@ pub fn vtrn1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26257,7 +26257,7 @@ pub fn vtrn1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26266,7 +26266,7 @@ pub fn vtrn1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26275,7 +26275,7 @@ pub fn vtrn1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26284,7 +26284,7 @@ pub fn vtrn1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26293,7 +26293,7 @@ pub fn vtrn1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26308,7 +26308,7 @@ pub fn vtrn1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26317,7 +26317,7 @@ pub fn vtrn1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn1q_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn1))] @@ -26326,7 +26326,7 @@ pub fn vtrn1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -26336,7 +26336,7 @@ pub fn vtrn2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -26346,7 +26346,7 @@ pub fn vtrn2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -26355,7 +26355,7 @@ pub fn vtrn2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -26364,7 +26364,7 @@ pub fn vtrn2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -26373,7 +26373,7 @@ pub fn vtrn2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -26382,7 +26382,7 @@ pub fn vtrn2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -26391,7 +26391,7 @@ pub fn vtrn2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -26400,7 +26400,7 @@ pub fn vtrn2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -26409,7 +26409,7 @@ pub fn vtrn2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26418,7 +26418,7 @@ pub fn vtrn2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26427,7 +26427,7 @@ pub fn vtrn2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26442,7 +26442,7 @@ pub fn vtrn2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26451,7 +26451,7 @@ pub fn vtrn2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26460,7 +26460,7 @@ pub fn vtrn2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26469,7 +26469,7 @@ pub fn vtrn2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26478,7 +26478,7 @@ pub fn vtrn2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26493,7 +26493,7 @@ pub fn vtrn2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26502,7 +26502,7 @@ pub fn vtrn2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26511,7 +26511,7 @@ pub fn vtrn2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26520,7 +26520,7 @@ pub fn vtrn2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26529,7 +26529,7 @@ pub fn vtrn2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26544,7 +26544,7 @@ pub fn vtrn2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26553,7 +26553,7 @@ pub fn vtrn2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Transpose vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn2q_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(trn2))] @@ -26562,7 +26562,7 @@ pub fn vtrn2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26575,7 +26575,7 @@ pub fn vtst_s64(a: int64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26588,7 +26588,7 @@ pub fn vtstq_s64(a: int64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26601,7 +26601,7 @@ pub fn vtst_p64(a: poly64x1_t, b: poly64x1_t) -> uint64x1_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26614,7 +26614,7 @@ pub fn vtstq_p64(a: poly64x2_t, b: poly64x2_t) -> uint64x2_t { } #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26627,7 +26627,7 @@ pub fn vtst_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(cmtst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26640,7 +26640,7 @@ pub fn vtstq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Compare bitwise test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26649,7 +26649,7 @@ pub fn vtstd_s64(a: i64, b: i64) -> u64 { } #[doc = "Compare bitwise test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(tst))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26658,7 +26658,7 @@ pub fn vtstd_u64(a: u64, b: u64) -> u64 { } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] @@ -26674,7 +26674,7 @@ pub fn vuqadd_s8(a: int8x8_t, b: uint8x8_t) -> int8x8_t { } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] @@ -26690,7 +26690,7 @@ pub fn vuqaddq_s8(a: int8x16_t, b: uint8x16_t) -> int8x16_t { } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] @@ -26706,7 +26706,7 @@ pub fn vuqadd_s16(a: int16x4_t, b: uint16x4_t) -> int16x4_t { } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] @@ -26722,7 +26722,7 @@ pub fn vuqaddq_s16(a: int16x8_t, b: uint16x8_t) -> int16x8_t { } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] @@ -26738,7 +26738,7 @@ pub fn vuqadd_s32(a: int32x2_t, b: uint32x2_t) -> int32x2_t { } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] @@ -26754,7 +26754,7 @@ pub fn vuqaddq_s32(a: int32x4_t, b: uint32x4_t) -> int32x4_t { } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] @@ -26770,7 +26770,7 @@ pub fn vuqadd_s64(a: int64x1_t, b: uint64x1_t) -> int64x1_t { } #[doc = "Signed saturating Accumulate of Unsigned value."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(test, assert_instr(suqadd))] @@ -26786,7 +26786,7 @@ pub fn vuqaddq_s64(a: int64x2_t, b: uint64x2_t) -> int64x2_t { } #[doc = "Signed saturating accumulate of unsigned value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddb_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26795,7 +26795,7 @@ pub fn vuqaddb_s8(a: i8, b: u8) -> i8 { } #[doc = "Signed saturating accumulate of unsigned value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26804,7 +26804,7 @@ pub fn vuqaddh_s16(a: i16, b: u16) -> i16 { } #[doc = "Signed saturating accumulate of unsigned value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqaddd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26820,7 +26820,7 @@ pub fn vuqaddd_s64(a: i64, b: u64) -> i64 { } #[doc = "Signed saturating accumulate of unsigned value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuqadds_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(test, assert_instr(suqadd))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -26836,7 +26836,7 @@ pub fn vuqadds_s32(a: i32, b: u32) -> i32 { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -26846,7 +26846,7 @@ pub fn vuzp1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -26856,7 +26856,7 @@ pub fn vuzp1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26865,7 +26865,7 @@ pub fn vuzp1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26874,7 +26874,7 @@ pub fn vuzp1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26883,7 +26883,7 @@ pub fn vuzp1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26892,7 +26892,7 @@ pub fn vuzp1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26901,7 +26901,7 @@ pub fn vuzp1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26910,7 +26910,7 @@ pub fn vuzp1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -26919,7 +26919,7 @@ pub fn vuzp1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -26928,7 +26928,7 @@ pub fn vuzp1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -26937,7 +26937,7 @@ pub fn vuzp1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -26952,7 +26952,7 @@ pub fn vuzp1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -26961,7 +26961,7 @@ pub fn vuzp1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -26970,7 +26970,7 @@ pub fn vuzp1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -26979,7 +26979,7 @@ pub fn vuzp1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -26988,7 +26988,7 @@ pub fn vuzp1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -27003,7 +27003,7 @@ pub fn vuzp1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -27012,7 +27012,7 @@ pub fn vuzp1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -27021,7 +27021,7 @@ pub fn vuzp1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -27030,7 +27030,7 @@ pub fn vuzp1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -27039,7 +27039,7 @@ pub fn vuzp1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -27054,7 +27054,7 @@ pub fn vuzp1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -27063,7 +27063,7 @@ pub fn vuzp1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp1q_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp1))] @@ -27072,7 +27072,7 @@ pub fn vuzp1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -27082,7 +27082,7 @@ pub fn vuzp2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -27092,7 +27092,7 @@ pub fn vuzp2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27101,7 +27101,7 @@ pub fn vuzp2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27110,7 +27110,7 @@ pub fn vuzp2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27119,7 +27119,7 @@ pub fn vuzp2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27128,7 +27128,7 @@ pub fn vuzp2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27137,7 +27137,7 @@ pub fn vuzp2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27146,7 +27146,7 @@ pub fn vuzp2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27155,7 +27155,7 @@ pub fn vuzp2q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27164,7 +27164,7 @@ pub fn vuzp2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27173,7 +27173,7 @@ pub fn vuzp2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27188,7 +27188,7 @@ pub fn vuzp2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27197,7 +27197,7 @@ pub fn vuzp2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27206,7 +27206,7 @@ pub fn vuzp2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27215,7 +27215,7 @@ pub fn vuzp2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27224,7 +27224,7 @@ pub fn vuzp2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27239,7 +27239,7 @@ pub fn vuzp2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27248,7 +27248,7 @@ pub fn vuzp2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27257,7 +27257,7 @@ pub fn vuzp2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27266,7 +27266,7 @@ pub fn vuzp2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27275,7 +27275,7 @@ pub fn vuzp2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27290,7 +27290,7 @@ pub fn vuzp2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27299,7 +27299,7 @@ pub fn vuzp2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp2q_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(uzp2))] @@ -27308,7 +27308,7 @@ pub fn vuzp2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Exclusive OR and rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vxarq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,sha3")] #[cfg_attr(test, assert_instr(xar, IMM6 = 0))] #[rustc_legacy_const_generics(2)] @@ -27326,7 +27326,7 @@ pub fn vxarq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -27336,7 +27336,7 @@ pub fn vzip1_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -27346,7 +27346,7 @@ pub fn vzip1q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27355,7 +27355,7 @@ pub fn vzip1_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27364,7 +27364,7 @@ pub fn vzip1q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27373,7 +27373,7 @@ pub fn vzip1q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27382,7 +27382,7 @@ pub fn vzip1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27397,7 +27397,7 @@ pub fn vzip1q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27406,7 +27406,7 @@ pub fn vzip1_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27415,7 +27415,7 @@ pub fn vzip1q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27424,7 +27424,7 @@ pub fn vzip1_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27433,7 +27433,7 @@ pub fn vzip1q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27442,7 +27442,7 @@ pub fn vzip1q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27451,7 +27451,7 @@ pub fn vzip1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27466,7 +27466,7 @@ pub fn vzip1q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27475,7 +27475,7 @@ pub fn vzip1_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27484,7 +27484,7 @@ pub fn vzip1q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27493,7 +27493,7 @@ pub fn vzip1_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27502,7 +27502,7 @@ pub fn vzip1q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27511,7 +27511,7 @@ pub fn vzip1q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27520,7 +27520,7 @@ pub fn vzip1_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27535,7 +27535,7 @@ pub fn vzip1q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27544,7 +27544,7 @@ pub fn vzip1_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27553,7 +27553,7 @@ pub fn vzip1q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip1q_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip1))] @@ -27562,7 +27562,7 @@ pub fn vzip1q_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -27572,7 +27572,7 @@ pub fn vzip2_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[stable(feature = "stdarch_neon_fp16", since = "1.94.0")] #[cfg(not(target_arch = "arm64ec"))] @@ -27582,7 +27582,7 @@ pub fn vzip2q_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27591,7 +27591,7 @@ pub fn vzip2_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27600,7 +27600,7 @@ pub fn vzip2q_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27609,7 +27609,7 @@ pub fn vzip2q_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27618,7 +27618,7 @@ pub fn vzip2_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27633,7 +27633,7 @@ pub fn vzip2q_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27642,7 +27642,7 @@ pub fn vzip2_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27651,7 +27651,7 @@ pub fn vzip2q_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27660,7 +27660,7 @@ pub fn vzip2_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27669,7 +27669,7 @@ pub fn vzip2q_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27678,7 +27678,7 @@ pub fn vzip2q_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27687,7 +27687,7 @@ pub fn vzip2_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27702,7 +27702,7 @@ pub fn vzip2q_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27711,7 +27711,7 @@ pub fn vzip2_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27720,7 +27720,7 @@ pub fn vzip2q_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27729,7 +27729,7 @@ pub fn vzip2_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27738,7 +27738,7 @@ pub fn vzip2q_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27747,7 +27747,7 @@ pub fn vzip2q_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27756,7 +27756,7 @@ pub fn vzip2_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27771,7 +27771,7 @@ pub fn vzip2q_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27780,7 +27780,7 @@ pub fn vzip2_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] @@ -27789,7 +27789,7 @@ pub fn vzip2q_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip2q_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(zip2))] diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs index 5f26d61e7c84f..6c6a2476a43fd 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve/generated.rs @@ -15,7 +15,7 @@ use crate::core_arch::arch::aarch64::*; #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -28,7 +28,7 @@ pub fn svabd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -37,7 +37,7 @@ pub fn svabd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -46,7 +46,7 @@ pub fn svabd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -55,7 +55,7 @@ pub fn svabd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -64,7 +64,7 @@ pub fn svabd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -73,7 +73,7 @@ pub fn svabd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -86,7 +86,7 @@ pub fn svabd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -95,7 +95,7 @@ pub fn svabd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -104,7 +104,7 @@ pub fn svabd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -113,7 +113,7 @@ pub fn svabd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -122,7 +122,7 @@ pub fn svabd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabd))] @@ -131,7 +131,7 @@ pub fn svabd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -144,7 +144,7 @@ pub fn svabd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -153,7 +153,7 @@ pub fn svabd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -162,7 +162,7 @@ pub fn svabd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -171,7 +171,7 @@ pub fn svabd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -180,7 +180,7 @@ pub fn svabd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -189,7 +189,7 @@ pub fn svabd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -202,7 +202,7 @@ pub fn svabd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -211,7 +211,7 @@ pub fn svabd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -220,7 +220,7 @@ pub fn svabd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -229,7 +229,7 @@ pub fn svabd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -238,7 +238,7 @@ pub fn svabd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -247,7 +247,7 @@ pub fn svabd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -260,7 +260,7 @@ pub fn svabd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -269,7 +269,7 @@ pub fn svabd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -278,7 +278,7 @@ pub fn svabd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -287,7 +287,7 @@ pub fn svabd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -296,7 +296,7 @@ pub fn svabd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -305,7 +305,7 @@ pub fn svabd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -318,7 +318,7 @@ pub fn svabd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -327,7 +327,7 @@ pub fn svabd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -336,7 +336,7 @@ pub fn svabd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -345,7 +345,7 @@ pub fn svabd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -354,7 +354,7 @@ pub fn svabd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabd))] @@ -363,7 +363,7 @@ pub fn svabd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -376,7 +376,7 @@ pub fn svabd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -385,7 +385,7 @@ pub fn svabd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -394,7 +394,7 @@ pub fn svabd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -403,7 +403,7 @@ pub fn svabd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -412,7 +412,7 @@ pub fn svabd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -421,7 +421,7 @@ pub fn svabd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -434,7 +434,7 @@ pub fn svabd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -443,7 +443,7 @@ pub fn svabd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -452,7 +452,7 @@ pub fn svabd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -461,7 +461,7 @@ pub fn svabd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -470,7 +470,7 @@ pub fn svabd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -479,7 +479,7 @@ pub fn svabd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -492,7 +492,7 @@ pub fn svabd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -501,7 +501,7 @@ pub fn svabd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -510,7 +510,7 @@ pub fn svabd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -519,7 +519,7 @@ pub fn svabd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -528,7 +528,7 @@ pub fn svabd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -537,7 +537,7 @@ pub fn svabd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -550,7 +550,7 @@ pub fn svabd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -559,7 +559,7 @@ pub fn svabd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -568,7 +568,7 @@ pub fn svabd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -577,7 +577,7 @@ pub fn svabd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -586,7 +586,7 @@ pub fn svabd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Absolute difference"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabd[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabd))] @@ -595,7 +595,7 @@ pub fn svabd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabs))] @@ -608,7 +608,7 @@ pub fn svabs_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfl } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabs))] @@ -617,7 +617,7 @@ pub fn svabs_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabs))] @@ -626,7 +626,7 @@ pub fn svabs_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabs))] @@ -639,7 +639,7 @@ pub fn svabs_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfl } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabs))] @@ -648,7 +648,7 @@ pub fn svabs_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fabs))] @@ -657,7 +657,7 @@ pub fn svabs_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -670,7 +670,7 @@ pub fn svabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -679,7 +679,7 @@ pub fn svabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -688,7 +688,7 @@ pub fn svabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -701,7 +701,7 @@ pub fn svabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_ } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -710,7 +710,7 @@ pub fn svabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -719,7 +719,7 @@ pub fn svabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -732,7 +732,7 @@ pub fn svabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_ } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -741,7 +741,7 @@ pub fn svabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -750,7 +750,7 @@ pub fn svabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -763,7 +763,7 @@ pub fn svabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_ } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -772,7 +772,7 @@ pub fn svabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabs[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(abs))] @@ -781,7 +781,7 @@ pub fn svabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Absolute compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facge))] @@ -794,7 +794,7 @@ pub fn svacge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Absolute compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facge))] @@ -803,7 +803,7 @@ pub fn svacge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Absolute compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facge))] @@ -816,7 +816,7 @@ pub fn svacge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Absolute compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacge[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facge))] @@ -825,7 +825,7 @@ pub fn svacge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facgt))] @@ -838,7 +838,7 @@ pub fn svacgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facgt))] @@ -847,7 +847,7 @@ pub fn svacgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facgt))] @@ -860,7 +860,7 @@ pub fn svacgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacgt[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facgt))] @@ -869,7 +869,7 @@ pub fn svacgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Absolute compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facge))] @@ -878,7 +878,7 @@ pub fn svacle_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Absolute compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facge))] @@ -887,7 +887,7 @@ pub fn svacle_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Absolute compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facge))] @@ -896,7 +896,7 @@ pub fn svacle_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Absolute compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svacle[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facge))] @@ -905,7 +905,7 @@ pub fn svacle_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facgt))] @@ -914,7 +914,7 @@ pub fn svaclt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facgt))] @@ -923,7 +923,7 @@ pub fn svaclt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facgt))] @@ -932,7 +932,7 @@ pub fn svaclt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaclt[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(facgt))] @@ -941,7 +941,7 @@ pub fn svaclt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -954,7 +954,7 @@ pub fn svadd_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -963,7 +963,7 @@ pub fn svadd_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -972,7 +972,7 @@ pub fn svadd_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -981,7 +981,7 @@ pub fn svadd_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -990,7 +990,7 @@ pub fn svadd_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -999,7 +999,7 @@ pub fn svadd_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -1012,7 +1012,7 @@ pub fn svadd_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -1021,7 +1021,7 @@ pub fn svadd_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -1030,7 +1030,7 @@ pub fn svadd_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -1039,7 +1039,7 @@ pub fn svadd_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -1048,7 +1048,7 @@ pub fn svadd_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadd))] @@ -1057,7 +1057,7 @@ pub fn svadd_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1070,7 +1070,7 @@ pub fn svadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1079,7 +1079,7 @@ pub fn svadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1088,7 +1088,7 @@ pub fn svadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1097,7 +1097,7 @@ pub fn svadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1106,7 +1106,7 @@ pub fn svadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1115,7 +1115,7 @@ pub fn svadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1128,7 +1128,7 @@ pub fn svadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1137,7 +1137,7 @@ pub fn svadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1146,7 +1146,7 @@ pub fn svadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1155,7 +1155,7 @@ pub fn svadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1164,7 +1164,7 @@ pub fn svadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1173,7 +1173,7 @@ pub fn svadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1186,7 +1186,7 @@ pub fn svadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1195,7 +1195,7 @@ pub fn svadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1204,7 +1204,7 @@ pub fn svadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1213,7 +1213,7 @@ pub fn svadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1222,7 +1222,7 @@ pub fn svadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1231,7 +1231,7 @@ pub fn svadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1244,7 +1244,7 @@ pub fn svadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1253,7 +1253,7 @@ pub fn svadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1262,7 +1262,7 @@ pub fn svadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1271,7 +1271,7 @@ pub fn svadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1280,7 +1280,7 @@ pub fn svadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1289,7 +1289,7 @@ pub fn svadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1298,7 +1298,7 @@ pub fn svadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1307,7 +1307,7 @@ pub fn svadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1316,7 +1316,7 @@ pub fn svadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1325,7 +1325,7 @@ pub fn svadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1334,7 +1334,7 @@ pub fn svadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1343,7 +1343,7 @@ pub fn svadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1352,7 +1352,7 @@ pub fn svadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1361,7 +1361,7 @@ pub fn svadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1370,7 +1370,7 @@ pub fn svadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1379,7 +1379,7 @@ pub fn svadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1388,7 +1388,7 @@ pub fn svadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1397,7 +1397,7 @@ pub fn svadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1406,7 +1406,7 @@ pub fn svadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1415,7 +1415,7 @@ pub fn svadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1424,7 +1424,7 @@ pub fn svadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1433,7 +1433,7 @@ pub fn svadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1442,7 +1442,7 @@ pub fn svadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1451,7 +1451,7 @@ pub fn svadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1460,7 +1460,7 @@ pub fn svadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1469,7 +1469,7 @@ pub fn svadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1478,7 +1478,7 @@ pub fn svadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1487,7 +1487,7 @@ pub fn svadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1496,7 +1496,7 @@ pub fn svadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadd[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(add))] @@ -1505,7 +1505,7 @@ pub fn svadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Add reduction (strictly-ordered)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadda))] @@ -1518,7 +1518,7 @@ pub fn svadda_f32(pg: svbool_t, initial: f32, op: svfloat32_t) -> f32 { } #[doc = "Add reduction (strictly-ordered)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadda[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fadda))] @@ -1531,7 +1531,7 @@ pub fn svadda_f64(pg: svbool_t, initial: f64, op: svfloat64_t) -> f64 { } #[doc = "Add reduction"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(faddv))] @@ -1544,7 +1544,7 @@ pub fn svaddv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { } #[doc = "Add reduction"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(faddv))] @@ -1557,7 +1557,7 @@ pub fn svaddv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { } #[doc = "Add reduction"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddv))] @@ -1570,7 +1570,7 @@ pub fn svaddv_s64(pg: svbool_t, op: svint64_t) -> i64 { } #[doc = "Add reduction"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddv))] @@ -1583,7 +1583,7 @@ pub fn svaddv_u64(pg: svbool_t, op: svuint64_t) -> u64 { } #[doc = "Add reduction"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddv))] @@ -1596,7 +1596,7 @@ pub fn svaddv_s8(pg: svbool_t, op: svint8_t) -> i64 { } #[doc = "Add reduction"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddv))] @@ -1609,7 +1609,7 @@ pub fn svaddv_s16(pg: svbool_t, op: svint16_t) -> i64 { } #[doc = "Add reduction"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddv))] @@ -1622,7 +1622,7 @@ pub fn svaddv_s32(pg: svbool_t, op: svint32_t) -> i64 { } #[doc = "Add reduction"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddv))] @@ -1635,7 +1635,7 @@ pub fn svaddv_u8(pg: svbool_t, op: svuint8_t) -> u64 { } #[doc = "Add reduction"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddv))] @@ -1648,7 +1648,7 @@ pub fn svaddv_u16(pg: svbool_t, op: svuint16_t) -> u64 { } #[doc = "Add reduction"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddv[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddv))] @@ -1661,7 +1661,7 @@ pub fn svaddv_u32(pg: svbool_t, op: svuint32_t) -> u64 { } #[doc = "Compute vector addresses for 8-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[s32]offset)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1674,7 +1674,7 @@ pub fn svadrb_u32base_s32offset(bases: svuint32_t, offsets: svint32_t) -> svuint } #[doc = "Compute vector addresses for 16-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[s32]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1687,7 +1687,7 @@ pub fn svadrh_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint3 } #[doc = "Compute vector addresses for 32-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[s32]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1700,7 +1700,7 @@ pub fn svadrw_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint3 } #[doc = "Compute vector addresses for 64-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[s32]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1713,7 +1713,7 @@ pub fn svadrd_u32base_s32index(bases: svuint32_t, indices: svint32_t) -> svuint3 } #[doc = "Compute vector addresses for 8-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u32base]_[u32]offset)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1722,7 +1722,7 @@ pub fn svadrb_u32base_u32offset(bases: svuint32_t, offsets: svuint32_t) -> svuin } #[doc = "Compute vector addresses for 16-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u32base]_[u32]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1731,7 +1731,7 @@ pub fn svadrh_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint } #[doc = "Compute vector addresses for 32-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u32base]_[u32]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1740,7 +1740,7 @@ pub fn svadrw_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint } #[doc = "Compute vector addresses for 64-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u32base]_[u32]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1749,7 +1749,7 @@ pub fn svadrd_u32base_u32index(bases: svuint32_t, indices: svuint32_t) -> svuint } #[doc = "Compute vector addresses for 8-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[s64]offset)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1762,7 +1762,7 @@ pub fn svadrb_u64base_s64offset(bases: svuint64_t, offsets: svint64_t) -> svuint } #[doc = "Compute vector addresses for 16-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[s64]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1775,7 +1775,7 @@ pub fn svadrh_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint6 } #[doc = "Compute vector addresses for 32-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[s64]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1788,7 +1788,7 @@ pub fn svadrw_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint6 } #[doc = "Compute vector addresses for 64-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[s64]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1801,7 +1801,7 @@ pub fn svadrd_u64base_s64index(bases: svuint64_t, indices: svint64_t) -> svuint6 } #[doc = "Compute vector addresses for 8-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrb[_u64base]_[u64]offset)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1810,7 +1810,7 @@ pub fn svadrb_u64base_u64offset(bases: svuint64_t, offsets: svuint64_t) -> svuin } #[doc = "Compute vector addresses for 16-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrh[_u64base]_[u64]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1819,7 +1819,7 @@ pub fn svadrh_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint } #[doc = "Compute vector addresses for 32-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrw[_u64base]_[u64]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1828,7 +1828,7 @@ pub fn svadrw_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint } #[doc = "Compute vector addresses for 64-bit data"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadrd[_u64base]_[u64]index)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adr))] @@ -1837,7 +1837,7 @@ pub fn svadrd_u64base_u64index(bases: svuint64_t, indices: svuint64_t) -> svuint } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1850,7 +1850,7 @@ pub fn svand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1863,7 +1863,7 @@ pub fn svand_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1872,7 +1872,7 @@ pub fn svand_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1881,7 +1881,7 @@ pub fn svand_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1890,7 +1890,7 @@ pub fn svand_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1899,7 +1899,7 @@ pub fn svand_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1908,7 +1908,7 @@ pub fn svand_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1921,7 +1921,7 @@ pub fn svand_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1930,7 +1930,7 @@ pub fn svand_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1939,7 +1939,7 @@ pub fn svand_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1948,7 +1948,7 @@ pub fn svand_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1957,7 +1957,7 @@ pub fn svand_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1966,7 +1966,7 @@ pub fn svand_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1979,7 +1979,7 @@ pub fn svand_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1988,7 +1988,7 @@ pub fn svand_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -1997,7 +1997,7 @@ pub fn svand_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2006,7 +2006,7 @@ pub fn svand_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2015,7 +2015,7 @@ pub fn svand_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2024,7 +2024,7 @@ pub fn svand_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2037,7 +2037,7 @@ pub fn svand_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2046,7 +2046,7 @@ pub fn svand_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2055,7 +2055,7 @@ pub fn svand_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2064,7 +2064,7 @@ pub fn svand_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2073,7 +2073,7 @@ pub fn svand_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2082,7 +2082,7 @@ pub fn svand_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2091,7 +2091,7 @@ pub fn svand_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2100,7 +2100,7 @@ pub fn svand_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2109,7 +2109,7 @@ pub fn svand_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2118,7 +2118,7 @@ pub fn svand_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2127,7 +2127,7 @@ pub fn svand_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2136,7 +2136,7 @@ pub fn svand_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2145,7 +2145,7 @@ pub fn svand_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2154,7 +2154,7 @@ pub fn svand_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2163,7 +2163,7 @@ pub fn svand_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2172,7 +2172,7 @@ pub fn svand_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2181,7 +2181,7 @@ pub fn svand_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2190,7 +2190,7 @@ pub fn svand_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2199,7 +2199,7 @@ pub fn svand_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2208,7 +2208,7 @@ pub fn svand_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2217,7 +2217,7 @@ pub fn svand_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2226,7 +2226,7 @@ pub fn svand_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2235,7 +2235,7 @@ pub fn svand_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2244,7 +2244,7 @@ pub fn svand_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2253,7 +2253,7 @@ pub fn svand_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2262,7 +2262,7 @@ pub fn svand_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2271,7 +2271,7 @@ pub fn svand_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2280,7 +2280,7 @@ pub fn svand_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2289,7 +2289,7 @@ pub fn svand_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise AND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svand[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(and))] @@ -2298,7 +2298,7 @@ pub fn svand_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise AND reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(andv))] @@ -2311,7 +2311,7 @@ pub fn svandv_s8(pg: svbool_t, op: svint8_t) -> i8 { } #[doc = "Bitwise AND reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(andv))] @@ -2324,7 +2324,7 @@ pub fn svandv_s16(pg: svbool_t, op: svint16_t) -> i16 { } #[doc = "Bitwise AND reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(andv))] @@ -2337,7 +2337,7 @@ pub fn svandv_s32(pg: svbool_t, op: svint32_t) -> i32 { } #[doc = "Bitwise AND reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(andv))] @@ -2350,7 +2350,7 @@ pub fn svandv_s64(pg: svbool_t, op: svint64_t) -> i64 { } #[doc = "Bitwise AND reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(andv))] @@ -2359,7 +2359,7 @@ pub fn svandv_u8(pg: svbool_t, op: svuint8_t) -> u8 { } #[doc = "Bitwise AND reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(andv))] @@ -2368,7 +2368,7 @@ pub fn svandv_u16(pg: svbool_t, op: svuint16_t) -> u16 { } #[doc = "Bitwise AND reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(andv))] @@ -2377,7 +2377,7 @@ pub fn svandv_u32(pg: svbool_t, op: svuint32_t) -> u32 { } #[doc = "Bitwise AND reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svandv[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(andv))] @@ -2386,7 +2386,7 @@ pub fn svandv_u64(pg: svbool_t, op: svuint64_t) -> u64 { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2399,7 +2399,7 @@ pub fn svasr_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2408,7 +2408,7 @@ pub fn svasr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2417,7 +2417,7 @@ pub fn svasr_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2426,7 +2426,7 @@ pub fn svasr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2435,7 +2435,7 @@ pub fn svasr_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2444,7 +2444,7 @@ pub fn svasr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2457,7 +2457,7 @@ pub fn svasr_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2466,7 +2466,7 @@ pub fn svasr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2475,7 +2475,7 @@ pub fn svasr_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2484,7 +2484,7 @@ pub fn svasr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2493,7 +2493,7 @@ pub fn svasr_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2502,7 +2502,7 @@ pub fn svasr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2515,7 +2515,7 @@ pub fn svasr_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2524,7 +2524,7 @@ pub fn svasr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2533,7 +2533,7 @@ pub fn svasr_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2542,7 +2542,7 @@ pub fn svasr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2551,7 +2551,7 @@ pub fn svasr_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2560,7 +2560,7 @@ pub fn svasr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2573,7 +2573,7 @@ pub fn svasr_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2582,7 +2582,7 @@ pub fn svasr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2591,7 +2591,7 @@ pub fn svasr_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2600,7 +2600,7 @@ pub fn svasr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2609,7 +2609,7 @@ pub fn svasr_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2618,7 +2618,7 @@ pub fn svasr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2634,7 +2634,7 @@ pub fn svasr_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2643,7 +2643,7 @@ pub fn svasr_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2652,7 +2652,7 @@ pub fn svasr_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2661,7 +2661,7 @@ pub fn svasr_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2670,7 +2670,7 @@ pub fn svasr_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2679,7 +2679,7 @@ pub fn svasr_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2695,7 +2695,7 @@ pub fn svasr_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint1 } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2704,7 +2704,7 @@ pub fn svasr_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2713,7 +2713,7 @@ pub fn svasr_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint1 } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2722,7 +2722,7 @@ pub fn svasr_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2731,7 +2731,7 @@ pub fn svasr_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint1 } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2740,7 +2740,7 @@ pub fn svasr_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2756,7 +2756,7 @@ pub fn svasr_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint3 } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2765,7 +2765,7 @@ pub fn svasr_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2774,7 +2774,7 @@ pub fn svasr_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint3 } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2783,7 +2783,7 @@ pub fn svasr_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2792,7 +2792,7 @@ pub fn svasr_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint3 } #[doc = "Arithmetic shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasr_wide[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asr))] @@ -2801,7 +2801,7 @@ pub fn svasr_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2815,7 +2815,7 @@ pub fn svasrd_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2824,7 +2824,7 @@ pub fn svasrd_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2833,7 +2833,7 @@ pub fn svasrd_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2847,7 +2847,7 @@ pub fn svasrd_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_ } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2856,7 +2856,7 @@ pub fn svasrd_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_ } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2865,7 +2865,7 @@ pub fn svasrd_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_ } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2879,7 +2879,7 @@ pub fn svasrd_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_ } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2888,7 +2888,7 @@ pub fn svasrd_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_ } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2897,7 +2897,7 @@ pub fn svasrd_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_ } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2911,7 +2911,7 @@ pub fn svasrd_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_ } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2920,7 +2920,7 @@ pub fn svasrd_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_ } #[doc = "Arithmetic shift right for divide by immediate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svasrd[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(asrd, IMM2 = 1))] @@ -2929,7 +2929,7 @@ pub fn svasrd_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_ } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -2942,7 +2942,7 @@ pub fn svbic_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -2955,7 +2955,7 @@ pub fn svbic_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -2964,7 +2964,7 @@ pub fn svbic_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -2973,7 +2973,7 @@ pub fn svbic_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -2982,7 +2982,7 @@ pub fn svbic_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -2991,7 +2991,7 @@ pub fn svbic_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3000,7 +3000,7 @@ pub fn svbic_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3013,7 +3013,7 @@ pub fn svbic_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3022,7 +3022,7 @@ pub fn svbic_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3031,7 +3031,7 @@ pub fn svbic_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3040,7 +3040,7 @@ pub fn svbic_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3049,7 +3049,7 @@ pub fn svbic_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3058,7 +3058,7 @@ pub fn svbic_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3071,7 +3071,7 @@ pub fn svbic_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3080,7 +3080,7 @@ pub fn svbic_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3089,7 +3089,7 @@ pub fn svbic_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3098,7 +3098,7 @@ pub fn svbic_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3107,7 +3107,7 @@ pub fn svbic_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3116,7 +3116,7 @@ pub fn svbic_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3129,7 +3129,7 @@ pub fn svbic_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3138,7 +3138,7 @@ pub fn svbic_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3147,7 +3147,7 @@ pub fn svbic_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3156,7 +3156,7 @@ pub fn svbic_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3165,7 +3165,7 @@ pub fn svbic_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3174,7 +3174,7 @@ pub fn svbic_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3183,7 +3183,7 @@ pub fn svbic_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3192,7 +3192,7 @@ pub fn svbic_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3201,7 +3201,7 @@ pub fn svbic_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3210,7 +3210,7 @@ pub fn svbic_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3219,7 +3219,7 @@ pub fn svbic_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3228,7 +3228,7 @@ pub fn svbic_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3237,7 +3237,7 @@ pub fn svbic_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3246,7 +3246,7 @@ pub fn svbic_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3255,7 +3255,7 @@ pub fn svbic_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3264,7 +3264,7 @@ pub fn svbic_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3273,7 +3273,7 @@ pub fn svbic_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3282,7 +3282,7 @@ pub fn svbic_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3291,7 +3291,7 @@ pub fn svbic_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3300,7 +3300,7 @@ pub fn svbic_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3309,7 +3309,7 @@ pub fn svbic_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3318,7 +3318,7 @@ pub fn svbic_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3327,7 +3327,7 @@ pub fn svbic_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3336,7 +3336,7 @@ pub fn svbic_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3345,7 +3345,7 @@ pub fn svbic_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3354,7 +3354,7 @@ pub fn svbic_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3363,7 +3363,7 @@ pub fn svbic_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3372,7 +3372,7 @@ pub fn svbic_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3381,7 +3381,7 @@ pub fn svbic_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise clear"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbic[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bic))] @@ -3390,7 +3390,7 @@ pub fn svbic_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Break after first true condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(brka))] @@ -3403,7 +3403,7 @@ pub fn svbrka_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { } #[doc = "Break after first true condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrka[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(brka))] @@ -3416,7 +3416,7 @@ pub fn svbrka_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { } #[doc = "Break before first true condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(brkb))] @@ -3429,7 +3429,7 @@ pub fn svbrkb_b_m(inactive: svbool_t, pg: svbool_t, op: svbool_t) -> svbool_t { } #[doc = "Break before first true condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkb[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(brkb))] @@ -3442,7 +3442,7 @@ pub fn svbrkb_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { } #[doc = "Propagate break to next partition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkn[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(brkn))] @@ -3455,7 +3455,7 @@ pub fn svbrkn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Break after first true condition, propagating from previous partition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpa[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(brkpa))] @@ -3471,7 +3471,7 @@ pub fn svbrkpa_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Break before first true condition, propagating from previous partition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbrkpb[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(brkpb))] @@ -3487,7 +3487,7 @@ pub fn svbrkpb_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] @@ -3510,7 +3510,7 @@ pub fn svcadd_f32_m( } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] @@ -3523,7 +3523,7 @@ pub fn svcadd_f32_x( } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] @@ -3536,7 +3536,7 @@ pub fn svcadd_f32_z( } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] @@ -3559,7 +3559,7 @@ pub fn svcadd_f64_m( } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] @@ -3572,7 +3572,7 @@ pub fn svcadd_f64_x( } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcadd, IMM_ROTATION = 90))] @@ -3585,7 +3585,7 @@ pub fn svcadd_f64_z( } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3598,7 +3598,7 @@ pub fn svclasta_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> s } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3611,7 +3611,7 @@ pub fn svclasta_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> s } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3624,7 +3624,7 @@ pub fn svclasta_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3637,7 +3637,7 @@ pub fn svclasta_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3650,7 +3650,7 @@ pub fn svclasta_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3663,7 +3663,7 @@ pub fn svclasta_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3672,7 +3672,7 @@ pub fn svclasta_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3681,7 +3681,7 @@ pub fn svclasta_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svu } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3690,7 +3690,7 @@ pub fn svclasta_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svu } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3699,7 +3699,7 @@ pub fn svclasta_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svu } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3715,7 +3715,7 @@ pub fn svclasta_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3731,7 +3731,7 @@ pub fn svclasta_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3747,7 +3747,7 @@ pub fn svclasta_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3763,7 +3763,7 @@ pub fn svclasta_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3779,7 +3779,7 @@ pub fn svclasta_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3795,7 +3795,7 @@ pub fn svclasta_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3804,7 +3804,7 @@ pub fn svclasta_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3813,7 +3813,7 @@ pub fn svclasta_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3822,7 +3822,7 @@ pub fn svclasta_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { } #[doc = "Conditionally extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclasta[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clasta))] @@ -3831,7 +3831,7 @@ pub fn svclasta_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3844,7 +3844,7 @@ pub fn svclastb_f32(pg: svbool_t, fallback: svfloat32_t, data: svfloat32_t) -> s } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3857,7 +3857,7 @@ pub fn svclastb_f64(pg: svbool_t, fallback: svfloat64_t, data: svfloat64_t) -> s } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3870,7 +3870,7 @@ pub fn svclastb_s8(pg: svbool_t, fallback: svint8_t, data: svint8_t) -> svint8_t } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3883,7 +3883,7 @@ pub fn svclastb_s16(pg: svbool_t, fallback: svint16_t, data: svint16_t) -> svint } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3896,7 +3896,7 @@ pub fn svclastb_s32(pg: svbool_t, fallback: svint32_t, data: svint32_t) -> svint } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3909,7 +3909,7 @@ pub fn svclastb_s64(pg: svbool_t, fallback: svint64_t, data: svint64_t) -> svint } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3918,7 +3918,7 @@ pub fn svclastb_u8(pg: svbool_t, fallback: svuint8_t, data: svuint8_t) -> svuint } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3927,7 +3927,7 @@ pub fn svclastb_u16(pg: svbool_t, fallback: svuint16_t, data: svuint16_t) -> svu } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3936,7 +3936,7 @@ pub fn svclastb_u32(pg: svbool_t, fallback: svuint32_t, data: svuint32_t) -> svu } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3945,7 +3945,7 @@ pub fn svclastb_u64(pg: svbool_t, fallback: svuint64_t, data: svuint64_t) -> svu } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3961,7 +3961,7 @@ pub fn svclastb_n_f32(pg: svbool_t, fallback: f32, data: svfloat32_t) -> f32 { } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3977,7 +3977,7 @@ pub fn svclastb_n_f64(pg: svbool_t, fallback: f64, data: svfloat64_t) -> f64 { } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -3993,7 +3993,7 @@ pub fn svclastb_n_s8(pg: svbool_t, fallback: i8, data: svint8_t) -> i8 { } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -4009,7 +4009,7 @@ pub fn svclastb_n_s16(pg: svbool_t, fallback: i16, data: svint16_t) -> i16 { } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -4025,7 +4025,7 @@ pub fn svclastb_n_s32(pg: svbool_t, fallback: i32, data: svint32_t) -> i32 { } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -4041,7 +4041,7 @@ pub fn svclastb_n_s64(pg: svbool_t, fallback: i64, data: svint64_t) -> i64 { } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -4050,7 +4050,7 @@ pub fn svclastb_n_u8(pg: svbool_t, fallback: u8, data: svuint8_t) -> u8 { } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -4059,7 +4059,7 @@ pub fn svclastb_n_u16(pg: svbool_t, fallback: u16, data: svuint16_t) -> u16 { } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -4068,7 +4068,7 @@ pub fn svclastb_n_u32(pg: svbool_t, fallback: u32, data: svuint32_t) -> u32 { } #[doc = "Conditionally extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclastb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clastb))] @@ -4077,7 +4077,7 @@ pub fn svclastb_n_u64(pg: svbool_t, fallback: u64, data: svuint64_t) -> u64 { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4090,7 +4090,7 @@ pub fn svcls_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4099,7 +4099,7 @@ pub fn svcls_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4108,7 +4108,7 @@ pub fn svcls_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4121,7 +4121,7 @@ pub fn svcls_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint1 } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4130,7 +4130,7 @@ pub fn svcls_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4139,7 +4139,7 @@ pub fn svcls_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4152,7 +4152,7 @@ pub fn svcls_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint3 } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4161,7 +4161,7 @@ pub fn svcls_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4170,7 +4170,7 @@ pub fn svcls_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4183,7 +4183,7 @@ pub fn svcls_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint6 } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4192,7 +4192,7 @@ pub fn svcls_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcls[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cls))] @@ -4201,7 +4201,7 @@ pub fn svcls_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4214,7 +4214,7 @@ pub fn svclz_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4223,7 +4223,7 @@ pub fn svclz_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4232,7 +4232,7 @@ pub fn svclz_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4245,7 +4245,7 @@ pub fn svclz_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint1 } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4254,7 +4254,7 @@ pub fn svclz_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4263,7 +4263,7 @@ pub fn svclz_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4276,7 +4276,7 @@ pub fn svclz_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint3 } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4285,7 +4285,7 @@ pub fn svclz_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4294,7 +4294,7 @@ pub fn svclz_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4307,7 +4307,7 @@ pub fn svclz_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint6 } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4316,7 +4316,7 @@ pub fn svclz_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4325,7 +4325,7 @@ pub fn svclz_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4334,7 +4334,7 @@ pub fn svclz_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4343,7 +4343,7 @@ pub fn svclz_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4352,7 +4352,7 @@ pub fn svclz_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4361,7 +4361,7 @@ pub fn svclz_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4370,7 +4370,7 @@ pub fn svclz_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4379,7 +4379,7 @@ pub fn svclz_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4388,7 +4388,7 @@ pub fn svclz_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4397,7 +4397,7 @@ pub fn svclz_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4406,7 +4406,7 @@ pub fn svclz_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4415,7 +4415,7 @@ pub fn svclz_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4424,7 +4424,7 @@ pub fn svclz_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svclz[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(clz))] @@ -4433,7 +4433,7 @@ pub fn svclz_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] @@ -4460,7 +4460,7 @@ pub fn svcmla_f32_m( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] @@ -4474,7 +4474,7 @@ pub fn svcmla_f32_x( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] @@ -4488,7 +4488,7 @@ pub fn svcmla_f32_z( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] @@ -4515,7 +4515,7 @@ pub fn svcmla_f64_m( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] @@ -4529,7 +4529,7 @@ pub fn svcmla_f64_x( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmla, IMM_ROTATION = 90))] @@ -4543,7 +4543,7 @@ pub fn svcmla_f64_z( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmla, IMM_INDEX = 0, IMM_ROTATION = 90))] @@ -4573,7 +4573,7 @@ pub fn svcmla_lane_f32( } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmeq))] @@ -4586,7 +4586,7 @@ pub fn svcmpeq_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmeq))] @@ -4595,7 +4595,7 @@ pub fn svcmpeq_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmeq))] @@ -4608,7 +4608,7 @@ pub fn svcmpeq_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmeq))] @@ -4617,7 +4617,7 @@ pub fn svcmpeq_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4630,7 +4630,7 @@ pub fn svcmpeq_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4639,7 +4639,7 @@ pub fn svcmpeq_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4652,7 +4652,7 @@ pub fn svcmpeq_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4661,7 +4661,7 @@ pub fn svcmpeq_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4674,7 +4674,7 @@ pub fn svcmpeq_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4683,7 +4683,7 @@ pub fn svcmpeq_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4696,7 +4696,7 @@ pub fn svcmpeq_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4705,7 +4705,7 @@ pub fn svcmpeq_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4714,7 +4714,7 @@ pub fn svcmpeq_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4723,7 +4723,7 @@ pub fn svcmpeq_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4732,7 +4732,7 @@ pub fn svcmpeq_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4741,7 +4741,7 @@ pub fn svcmpeq_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4750,7 +4750,7 @@ pub fn svcmpeq_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4759,7 +4759,7 @@ pub fn svcmpeq_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4768,7 +4768,7 @@ pub fn svcmpeq_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4777,7 +4777,7 @@ pub fn svcmpeq_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4793,7 +4793,7 @@ pub fn svcmpeq_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4802,7 +4802,7 @@ pub fn svcmpeq_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4818,7 +4818,7 @@ pub fn svcmpeq_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_ } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4827,7 +4827,7 @@ pub fn svcmpeq_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4843,7 +4843,7 @@ pub fn svcmpeq_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_ } #[doc = "Compare equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpeq_wide[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpeq))] @@ -4852,7 +4852,7 @@ pub fn svcmpeq_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmge))] @@ -4865,7 +4865,7 @@ pub fn svcmpge_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmge))] @@ -4874,7 +4874,7 @@ pub fn svcmpge_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmge))] @@ -4887,7 +4887,7 @@ pub fn svcmpge_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmge))] @@ -4896,7 +4896,7 @@ pub fn svcmpge_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -4909,7 +4909,7 @@ pub fn svcmpge_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -4918,7 +4918,7 @@ pub fn svcmpge_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -4931,7 +4931,7 @@ pub fn svcmpge_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -4940,7 +4940,7 @@ pub fn svcmpge_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -4953,7 +4953,7 @@ pub fn svcmpge_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -4962,7 +4962,7 @@ pub fn svcmpge_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -4975,7 +4975,7 @@ pub fn svcmpge_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -4984,7 +4984,7 @@ pub fn svcmpge_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -4997,7 +4997,7 @@ pub fn svcmpge_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5006,7 +5006,7 @@ pub fn svcmpge_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5019,7 +5019,7 @@ pub fn svcmpge_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5028,7 +5028,7 @@ pub fn svcmpge_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5041,7 +5041,7 @@ pub fn svcmpge_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5050,7 +5050,7 @@ pub fn svcmpge_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5063,7 +5063,7 @@ pub fn svcmpge_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5072,7 +5072,7 @@ pub fn svcmpge_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5088,7 +5088,7 @@ pub fn svcmpge_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5097,7 +5097,7 @@ pub fn svcmpge_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5113,7 +5113,7 @@ pub fn svcmpge_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_ } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5122,7 +5122,7 @@ pub fn svcmpge_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5138,7 +5138,7 @@ pub fn svcmpge_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_ } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5147,7 +5147,7 @@ pub fn svcmpge_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5163,7 +5163,7 @@ pub fn svcmpge_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_ } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5172,7 +5172,7 @@ pub fn svcmpge_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5188,7 +5188,7 @@ pub fn svcmpge_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svboo } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5197,7 +5197,7 @@ pub fn svcmpge_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5213,7 +5213,7 @@ pub fn svcmpge_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svboo } #[doc = "Compare greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpge_wide[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5222,7 +5222,7 @@ pub fn svcmpge_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmgt))] @@ -5235,7 +5235,7 @@ pub fn svcmpgt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmgt))] @@ -5244,7 +5244,7 @@ pub fn svcmpgt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmgt))] @@ -5257,7 +5257,7 @@ pub fn svcmpgt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmgt))] @@ -5266,7 +5266,7 @@ pub fn svcmpgt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5279,7 +5279,7 @@ pub fn svcmpgt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5288,7 +5288,7 @@ pub fn svcmpgt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5301,7 +5301,7 @@ pub fn svcmpgt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5310,7 +5310,7 @@ pub fn svcmpgt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5323,7 +5323,7 @@ pub fn svcmpgt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5332,7 +5332,7 @@ pub fn svcmpgt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5345,7 +5345,7 @@ pub fn svcmpgt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5354,7 +5354,7 @@ pub fn svcmpgt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5367,7 +5367,7 @@ pub fn svcmpgt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5376,7 +5376,7 @@ pub fn svcmpgt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5389,7 +5389,7 @@ pub fn svcmpgt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5398,7 +5398,7 @@ pub fn svcmpgt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5411,7 +5411,7 @@ pub fn svcmpgt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5420,7 +5420,7 @@ pub fn svcmpgt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5433,7 +5433,7 @@ pub fn svcmpgt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5442,7 +5442,7 @@ pub fn svcmpgt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5458,7 +5458,7 @@ pub fn svcmpgt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5467,7 +5467,7 @@ pub fn svcmpgt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5483,7 +5483,7 @@ pub fn svcmpgt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_ } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5492,7 +5492,7 @@ pub fn svcmpgt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5508,7 +5508,7 @@ pub fn svcmpgt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_ } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5517,7 +5517,7 @@ pub fn svcmpgt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5533,7 +5533,7 @@ pub fn svcmpgt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_ } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5542,7 +5542,7 @@ pub fn svcmpgt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5558,7 +5558,7 @@ pub fn svcmpgt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svboo } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5567,7 +5567,7 @@ pub fn svcmpgt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5583,7 +5583,7 @@ pub fn svcmpgt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svboo } #[doc = "Compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpgt_wide[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -5592,7 +5592,7 @@ pub fn svcmpgt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmge))] @@ -5601,7 +5601,7 @@ pub fn svcmple_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmge))] @@ -5610,7 +5610,7 @@ pub fn svcmple_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmge))] @@ -5619,7 +5619,7 @@ pub fn svcmple_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmge))] @@ -5628,7 +5628,7 @@ pub fn svcmple_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5637,7 +5637,7 @@ pub fn svcmple_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5646,7 +5646,7 @@ pub fn svcmple_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5655,7 +5655,7 @@ pub fn svcmple_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5664,7 +5664,7 @@ pub fn svcmple_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5673,7 +5673,7 @@ pub fn svcmple_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5682,7 +5682,7 @@ pub fn svcmple_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5691,7 +5691,7 @@ pub fn svcmple_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpge))] @@ -5700,7 +5700,7 @@ pub fn svcmple_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5709,7 +5709,7 @@ pub fn svcmple_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5718,7 +5718,7 @@ pub fn svcmple_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5727,7 +5727,7 @@ pub fn svcmple_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5736,7 +5736,7 @@ pub fn svcmple_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5745,7 +5745,7 @@ pub fn svcmple_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5754,7 +5754,7 @@ pub fn svcmple_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5763,7 +5763,7 @@ pub fn svcmple_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphs))] @@ -5772,7 +5772,7 @@ pub fn svcmple_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmple))] @@ -5788,7 +5788,7 @@ pub fn svcmple_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmple))] @@ -5797,7 +5797,7 @@ pub fn svcmple_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmple))] @@ -5813,7 +5813,7 @@ pub fn svcmple_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_ } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmple))] @@ -5822,7 +5822,7 @@ pub fn svcmple_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmple))] @@ -5838,7 +5838,7 @@ pub fn svcmple_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_ } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmple))] @@ -5847,7 +5847,7 @@ pub fn svcmple_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpls))] @@ -5863,7 +5863,7 @@ pub fn svcmple_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_ } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpls))] @@ -5872,7 +5872,7 @@ pub fn svcmple_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpls))] @@ -5888,7 +5888,7 @@ pub fn svcmple_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svboo } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpls))] @@ -5897,7 +5897,7 @@ pub fn svcmple_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpls))] @@ -5913,7 +5913,7 @@ pub fn svcmple_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svboo } #[doc = "Compare less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmple_wide[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpls))] @@ -5922,7 +5922,7 @@ pub fn svcmple_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmgt))] @@ -5931,7 +5931,7 @@ pub fn svcmplt_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmgt))] @@ -5940,7 +5940,7 @@ pub fn svcmplt_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmgt))] @@ -5949,7 +5949,7 @@ pub fn svcmplt_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmgt))] @@ -5958,7 +5958,7 @@ pub fn svcmplt_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5967,7 +5967,7 @@ pub fn svcmplt_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5976,7 +5976,7 @@ pub fn svcmplt_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5985,7 +5985,7 @@ pub fn svcmplt_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -5994,7 +5994,7 @@ pub fn svcmplt_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -6003,7 +6003,7 @@ pub fn svcmplt_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -6012,7 +6012,7 @@ pub fn svcmplt_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -6021,7 +6021,7 @@ pub fn svcmplt_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpgt))] @@ -6030,7 +6030,7 @@ pub fn svcmplt_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -6039,7 +6039,7 @@ pub fn svcmplt_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -6048,7 +6048,7 @@ pub fn svcmplt_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -6057,7 +6057,7 @@ pub fn svcmplt_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -6066,7 +6066,7 @@ pub fn svcmplt_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -6075,7 +6075,7 @@ pub fn svcmplt_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -6084,7 +6084,7 @@ pub fn svcmplt_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -6093,7 +6093,7 @@ pub fn svcmplt_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmphi))] @@ -6102,7 +6102,7 @@ pub fn svcmplt_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplt))] @@ -6118,7 +6118,7 @@ pub fn svcmplt_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplt))] @@ -6127,7 +6127,7 @@ pub fn svcmplt_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplt))] @@ -6143,7 +6143,7 @@ pub fn svcmplt_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_ } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplt))] @@ -6152,7 +6152,7 @@ pub fn svcmplt_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplt))] @@ -6168,7 +6168,7 @@ pub fn svcmplt_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_ } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplt))] @@ -6177,7 +6177,7 @@ pub fn svcmplt_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplo))] @@ -6193,7 +6193,7 @@ pub fn svcmplt_wide_u8(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svbool_ } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplo))] @@ -6202,7 +6202,7 @@ pub fn svcmplt_wide_n_u8(pg: svbool_t, op1: svuint8_t, op2: u64) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplo))] @@ -6218,7 +6218,7 @@ pub fn svcmplt_wide_u16(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svboo } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplo))] @@ -6227,7 +6227,7 @@ pub fn svcmplt_wide_n_u16(pg: svbool_t, op1: svuint16_t, op2: u64) -> svbool_t { } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplo))] @@ -6243,7 +6243,7 @@ pub fn svcmplt_wide_u32(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svboo } #[doc = "Compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmplt_wide[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmplo))] @@ -6252,7 +6252,7 @@ pub fn svcmplt_wide_n_u32(pg: svbool_t, op1: svuint32_t, op2: u64) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmne))] @@ -6265,7 +6265,7 @@ pub fn svcmpne_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmne))] @@ -6274,7 +6274,7 @@ pub fn svcmpne_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmne))] @@ -6287,7 +6287,7 @@ pub fn svcmpne_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmne))] @@ -6296,7 +6296,7 @@ pub fn svcmpne_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6309,7 +6309,7 @@ pub fn svcmpne_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6318,7 +6318,7 @@ pub fn svcmpne_n_s8(pg: svbool_t, op1: svint8_t, op2: i8) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6331,7 +6331,7 @@ pub fn svcmpne_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6340,7 +6340,7 @@ pub fn svcmpne_n_s16(pg: svbool_t, op1: svint16_t, op2: i16) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6353,7 +6353,7 @@ pub fn svcmpne_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6362,7 +6362,7 @@ pub fn svcmpne_n_s32(pg: svbool_t, op1: svint32_t, op2: i32) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6375,7 +6375,7 @@ pub fn svcmpne_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6384,7 +6384,7 @@ pub fn svcmpne_n_s64(pg: svbool_t, op1: svint64_t, op2: i64) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6393,7 +6393,7 @@ pub fn svcmpne_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6402,7 +6402,7 @@ pub fn svcmpne_n_u8(pg: svbool_t, op1: svuint8_t, op2: u8) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6411,7 +6411,7 @@ pub fn svcmpne_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6420,7 +6420,7 @@ pub fn svcmpne_n_u16(pg: svbool_t, op1: svuint16_t, op2: u16) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6429,7 +6429,7 @@ pub fn svcmpne_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6438,7 +6438,7 @@ pub fn svcmpne_n_u32(pg: svbool_t, op1: svuint32_t, op2: u32) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6447,7 +6447,7 @@ pub fn svcmpne_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6456,7 +6456,7 @@ pub fn svcmpne_n_u64(pg: svbool_t, op1: svuint64_t, op2: u64) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6472,7 +6472,7 @@ pub fn svcmpne_wide_s8(pg: svbool_t, op1: svint8_t, op2: svint64_t) -> svbool_t } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6481,7 +6481,7 @@ pub fn svcmpne_wide_n_s8(pg: svbool_t, op1: svint8_t, op2: i64) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6497,7 +6497,7 @@ pub fn svcmpne_wide_s16(pg: svbool_t, op1: svint16_t, op2: svint64_t) -> svbool_ } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6506,7 +6506,7 @@ pub fn svcmpne_wide_n_s16(pg: svbool_t, op1: svint16_t, op2: i64) -> svbool_t { } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6522,7 +6522,7 @@ pub fn svcmpne_wide_s32(pg: svbool_t, op1: svint32_t, op2: svint64_t) -> svbool_ } #[doc = "Compare not equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpne_wide[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmpne))] @@ -6531,7 +6531,7 @@ pub fn svcmpne_wide_n_s32(pg: svbool_t, op1: svint32_t, op2: i64) -> svbool_t { } #[doc = "Compare unordered with"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmuo))] @@ -6544,7 +6544,7 @@ pub fn svcmpuo_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svbool_t } #[doc = "Compare unordered with"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmuo))] @@ -6553,7 +6553,7 @@ pub fn svcmpuo_n_f32(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svbool_t { } #[doc = "Compare unordered with"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmuo))] @@ -6566,7 +6566,7 @@ pub fn svcmpuo_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svbool_t } #[doc = "Compare unordered with"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmpuo[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcmuo))] @@ -6575,7 +6575,7 @@ pub fn svcmpuo_n_f64(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svbool_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6588,7 +6588,7 @@ pub fn svcnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6597,7 +6597,7 @@ pub fn svcnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6606,7 +6606,7 @@ pub fn svcnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6619,7 +6619,7 @@ pub fn svcnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16 } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6628,7 +6628,7 @@ pub fn svcnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6637,7 +6637,7 @@ pub fn svcnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6650,7 +6650,7 @@ pub fn svcnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32 } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6659,7 +6659,7 @@ pub fn svcnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6668,7 +6668,7 @@ pub fn svcnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6681,7 +6681,7 @@ pub fn svcnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64 } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6690,7 +6690,7 @@ pub fn svcnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6699,7 +6699,7 @@ pub fn svcnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6708,7 +6708,7 @@ pub fn svcnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_ } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6717,7 +6717,7 @@ pub fn svcnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6726,7 +6726,7 @@ pub fn svcnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6735,7 +6735,7 @@ pub fn svcnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuin } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6744,7 +6744,7 @@ pub fn svcnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6753,7 +6753,7 @@ pub fn svcnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6762,7 +6762,7 @@ pub fn svcnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuin } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6771,7 +6771,7 @@ pub fn svcnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6780,7 +6780,7 @@ pub fn svcnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6789,7 +6789,7 @@ pub fn svcnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuin } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6798,7 +6798,7 @@ pub fn svcnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Logically invert boolean condition"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnot[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnot))] @@ -6807,7 +6807,7 @@ pub fn svcnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6820,7 +6820,7 @@ pub fn svcnt_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> svuin } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6829,7 +6829,7 @@ pub fn svcnt_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6838,7 +6838,7 @@ pub fn svcnt_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6851,7 +6851,7 @@ pub fn svcnt_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> svuin } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6860,7 +6860,7 @@ pub fn svcnt_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6869,7 +6869,7 @@ pub fn svcnt_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6882,7 +6882,7 @@ pub fn svcnt_s8_m(inactive: svuint8_t, pg: svbool_t, op: svint8_t) -> svuint8_t } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6891,7 +6891,7 @@ pub fn svcnt_s8_x(pg: svbool_t, op: svint8_t) -> svuint8_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6900,7 +6900,7 @@ pub fn svcnt_s8_z(pg: svbool_t, op: svint8_t) -> svuint8_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6913,7 +6913,7 @@ pub fn svcnt_s16_m(inactive: svuint16_t, pg: svbool_t, op: svint16_t) -> svuint1 } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6922,7 +6922,7 @@ pub fn svcnt_s16_x(pg: svbool_t, op: svint16_t) -> svuint16_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6931,7 +6931,7 @@ pub fn svcnt_s16_z(pg: svbool_t, op: svint16_t) -> svuint16_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6944,7 +6944,7 @@ pub fn svcnt_s32_m(inactive: svuint32_t, pg: svbool_t, op: svint32_t) -> svuint3 } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6953,7 +6953,7 @@ pub fn svcnt_s32_x(pg: svbool_t, op: svint32_t) -> svuint32_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6962,7 +6962,7 @@ pub fn svcnt_s32_z(pg: svbool_t, op: svint32_t) -> svuint32_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6975,7 +6975,7 @@ pub fn svcnt_s64_m(inactive: svuint64_t, pg: svbool_t, op: svint64_t) -> svuint6 } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6984,7 +6984,7 @@ pub fn svcnt_s64_x(pg: svbool_t, op: svint64_t) -> svuint64_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -6993,7 +6993,7 @@ pub fn svcnt_s64_z(pg: svbool_t, op: svint64_t) -> svuint64_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7002,7 +7002,7 @@ pub fn svcnt_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7011,7 +7011,7 @@ pub fn svcnt_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7020,7 +7020,7 @@ pub fn svcnt_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7029,7 +7029,7 @@ pub fn svcnt_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7038,7 +7038,7 @@ pub fn svcnt_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7047,7 +7047,7 @@ pub fn svcnt_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7056,7 +7056,7 @@ pub fn svcnt_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7065,7 +7065,7 @@ pub fn svcnt_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7074,7 +7074,7 @@ pub fn svcnt_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7083,7 +7083,7 @@ pub fn svcnt_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7092,7 +7092,7 @@ pub fn svcnt_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Count nonzero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnt[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnt))] @@ -7101,7 +7101,7 @@ pub fn svcnt_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Count the number of 8-bit elements in a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rdvl))] @@ -7110,7 +7110,7 @@ pub fn svcntb() -> u64 { } #[doc = "Count the number of 16-bit elements in a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnth))] @@ -7119,7 +7119,7 @@ pub fn svcnth() -> u64 { } #[doc = "Count the number of 32-bit elements in a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntw))] @@ -7128,7 +7128,7 @@ pub fn svcntw() -> u64 { } #[doc = "Count the number of 64-bit elements in a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntd))] @@ -7137,7 +7137,7 @@ pub fn svcntd() -> u64 { } #[doc = "Count the number of 8-bit elements in a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntb_pat)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (rdvl , PATTERN = { svpattern :: SV_ALL }))] @@ -7151,7 +7151,7 @@ pub fn svcntb_pat() -> u64 { } #[doc = "Count the number of 16-bit elements in a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcnth_pat)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (cnth , PATTERN = { svpattern :: SV_ALL }))] @@ -7164,7 +7164,7 @@ pub fn svcnth_pat() -> u64 { } #[doc = "Count the number of 32-bit elements in a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntw_pat)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (cntw , PATTERN = { svpattern :: SV_ALL }))] @@ -7177,7 +7177,7 @@ pub fn svcntw_pat() -> u64 { } #[doc = "Count the number of 64-bit elements in a vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntd_pat)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (cntd , PATTERN = { svpattern :: SV_ALL }))] @@ -7190,7 +7190,7 @@ pub fn svcntd_pat() -> u64 { } #[doc = "Count set predicate bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntp))] @@ -7203,7 +7203,7 @@ pub fn svcntp_b8(pg: svbool_t, op: svbool_t) -> u64 { } #[doc = "Count set predicate bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntp))] @@ -7216,7 +7216,7 @@ pub fn svcntp_b16(pg: svbool_t, op: svbool_t) -> u64 { } #[doc = "Count set predicate bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntp))] @@ -7229,7 +7229,7 @@ pub fn svcntp_b32(pg: svbool_t, op: svbool_t) -> u64 { } #[doc = "Count set predicate bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcntp_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntp))] @@ -7242,7 +7242,7 @@ pub fn svcntp_b64(pg: svbool_t, op: svbool_t) -> u64 { } #[doc = "Shuffle active elements of vector to the right and fill with zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(compact))] @@ -7258,7 +7258,7 @@ pub fn svcompact_f32(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Shuffle active elements of vector to the right and fill with zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(compact))] @@ -7274,7 +7274,7 @@ pub fn svcompact_f64(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Shuffle active elements of vector to the right and fill with zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(compact))] @@ -7290,7 +7290,7 @@ pub fn svcompact_s32(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Shuffle active elements of vector to the right and fill with zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(compact))] @@ -7306,7 +7306,7 @@ pub fn svcompact_s64(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Shuffle active elements of vector to the right and fill with zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(compact))] @@ -7315,7 +7315,7 @@ pub fn svcompact_u32(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Shuffle active elements of vector to the right and fill with zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcompact[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(compact))] @@ -7324,7 +7324,7 @@ pub fn svcompact_u64(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Create a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t { @@ -7332,7 +7332,7 @@ pub fn svcreate2_f32(x0: svfloat32_t, x1: svfloat32_t) -> svfloat32x2_t { } #[doc = "Create a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t { @@ -7340,7 +7340,7 @@ pub fn svcreate2_f64(x0: svfloat64_t, x1: svfloat64_t) -> svfloat64x2_t { } #[doc = "Create a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t { @@ -7348,7 +7348,7 @@ pub fn svcreate2_s8(x0: svint8_t, x1: svint8_t) -> svint8x2_t { } #[doc = "Create a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t { @@ -7356,7 +7356,7 @@ pub fn svcreate2_s16(x0: svint16_t, x1: svint16_t) -> svint16x2_t { } #[doc = "Create a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t { @@ -7364,7 +7364,7 @@ pub fn svcreate2_s32(x0: svint32_t, x1: svint32_t) -> svint32x2_t { } #[doc = "Create a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t { @@ -7372,7 +7372,7 @@ pub fn svcreate2_s64(x0: svint64_t, x1: svint64_t) -> svint64x2_t { } #[doc = "Create a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate2_u8(x0: svuint8_t, x1: svuint8_t) -> svuint8x2_t { @@ -7380,7 +7380,7 @@ pub fn svcreate2_u8(x0: svuint8_t, x1: svuint8_t) -> svuint8x2_t { } #[doc = "Create a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate2_u16(x0: svuint16_t, x1: svuint16_t) -> svuint16x2_t { @@ -7388,7 +7388,7 @@ pub fn svcreate2_u16(x0: svuint16_t, x1: svuint16_t) -> svuint16x2_t { } #[doc = "Create a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate2_u32(x0: svuint32_t, x1: svuint32_t) -> svuint32x2_t { @@ -7396,7 +7396,7 @@ pub fn svcreate2_u32(x0: svuint32_t, x1: svuint32_t) -> svuint32x2_t { } #[doc = "Create a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate2[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate2_u64(x0: svuint64_t, x1: svuint64_t) -> svuint64x2_t { @@ -7404,7 +7404,7 @@ pub fn svcreate2_u64(x0: svuint64_t, x1: svuint64_t) -> svuint64x2_t { } #[doc = "Create a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svfloat32x3_t { @@ -7412,7 +7412,7 @@ pub fn svcreate3_f32(x0: svfloat32_t, x1: svfloat32_t, x2: svfloat32_t) -> svflo } #[doc = "Create a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svfloat64x3_t { @@ -7420,7 +7420,7 @@ pub fn svcreate3_f64(x0: svfloat64_t, x1: svfloat64_t, x2: svfloat64_t) -> svflo } #[doc = "Create a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t { @@ -7428,7 +7428,7 @@ pub fn svcreate3_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t) -> svint8x3_t { } #[doc = "Create a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t { @@ -7436,7 +7436,7 @@ pub fn svcreate3_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t) -> svint16x3_t } #[doc = "Create a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t { @@ -7444,7 +7444,7 @@ pub fn svcreate3_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t) -> svint32x3_t } #[doc = "Create a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t { @@ -7452,7 +7452,7 @@ pub fn svcreate3_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t) -> svint64x3_t } #[doc = "Create a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate3_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t) -> svuint8x3_t { @@ -7460,7 +7460,7 @@ pub fn svcreate3_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t) -> svuint8x3_t } #[doc = "Create a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate3_u16(x0: svuint16_t, x1: svuint16_t, x2: svuint16_t) -> svuint16x3_t { @@ -7468,7 +7468,7 @@ pub fn svcreate3_u16(x0: svuint16_t, x1: svuint16_t, x2: svuint16_t) -> svuint16 } #[doc = "Create a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate3_u32(x0: svuint32_t, x1: svuint32_t, x2: svuint32_t) -> svuint32x3_t { @@ -7476,7 +7476,7 @@ pub fn svcreate3_u32(x0: svuint32_t, x1: svuint32_t, x2: svuint32_t) -> svuint32 } #[doc = "Create a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate3[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate3_u64(x0: svuint64_t, x1: svuint64_t, x2: svuint64_t) -> svuint64x3_t { @@ -7484,7 +7484,7 @@ pub fn svcreate3_u64(x0: svuint64_t, x1: svuint64_t, x2: svuint64_t) -> svuint64 } #[doc = "Create a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate4_f32( @@ -7497,7 +7497,7 @@ pub fn svcreate4_f32( } #[doc = "Create a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate4_f64( @@ -7510,7 +7510,7 @@ pub fn svcreate4_f64( } #[doc = "Create a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> svint8x4_t { @@ -7518,7 +7518,7 @@ pub fn svcreate4_s8(x0: svint8_t, x1: svint8_t, x2: svint8_t, x3: svint8_t) -> s } #[doc = "Create a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate4_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t, x3: svint16_t) -> svint16x4_t { @@ -7526,7 +7526,7 @@ pub fn svcreate4_s16(x0: svint16_t, x1: svint16_t, x2: svint16_t, x3: svint16_t) } #[doc = "Create a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate4_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t, x3: svint32_t) -> svint32x4_t { @@ -7534,7 +7534,7 @@ pub fn svcreate4_s32(x0: svint32_t, x1: svint32_t, x2: svint32_t, x3: svint32_t) } #[doc = "Create a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate4_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t, x3: svint64_t) -> svint64x4_t { @@ -7542,7 +7542,7 @@ pub fn svcreate4_s64(x0: svint64_t, x1: svint64_t, x2: svint64_t, x3: svint64_t) } #[doc = "Create a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate4_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t, x3: svuint8_t) -> svuint8x4_t { @@ -7550,7 +7550,7 @@ pub fn svcreate4_u8(x0: svuint8_t, x1: svuint8_t, x2: svuint8_t, x3: svuint8_t) } #[doc = "Create a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate4_u16( @@ -7563,7 +7563,7 @@ pub fn svcreate4_u16( } #[doc = "Create a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate4_u32( @@ -7576,7 +7576,7 @@ pub fn svcreate4_u32( } #[doc = "Create a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcreate4[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svcreate4_u64( @@ -7589,7 +7589,7 @@ pub fn svcreate4_u64( } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvt))] @@ -7602,7 +7602,7 @@ pub fn svcvt_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvt))] @@ -7611,7 +7611,7 @@ pub fn svcvt_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvt))] @@ -7620,7 +7620,7 @@ pub fn svcvt_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvt))] @@ -7633,7 +7633,7 @@ pub fn svcvt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) -> } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvt))] @@ -7642,7 +7642,7 @@ pub fn svcvt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvt))] @@ -7651,7 +7651,7 @@ pub fn svcvt_f64_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7664,7 +7664,7 @@ pub fn svcvt_f32_s32_m(inactive: svfloat32_t, pg: svbool_t, op: svint32_t) -> sv } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7673,7 +7673,7 @@ pub fn svcvt_f32_s32_x(pg: svbool_t, op: svint32_t) -> svfloat32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7682,7 +7682,7 @@ pub fn svcvt_f32_s32_z(pg: svbool_t, op: svint32_t) -> svfloat32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7695,7 +7695,7 @@ pub fn svcvt_f32_s64_m(inactive: svfloat32_t, pg: svbool_t, op: svint64_t) -> sv } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7704,7 +7704,7 @@ pub fn svcvt_f32_s64_x(pg: svbool_t, op: svint64_t) -> svfloat32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7713,7 +7713,7 @@ pub fn svcvt_f32_s64_z(pg: svbool_t, op: svint64_t) -> svfloat32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7726,7 +7726,7 @@ pub fn svcvt_f32_u32_m(inactive: svfloat32_t, pg: svbool_t, op: svuint32_t) -> s } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7735,7 +7735,7 @@ pub fn svcvt_f32_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7744,7 +7744,7 @@ pub fn svcvt_f32_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7757,7 +7757,7 @@ pub fn svcvt_f32_u64_m(inactive: svfloat32_t, pg: svbool_t, op: svuint64_t) -> s } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7766,7 +7766,7 @@ pub fn svcvt_f32_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f32[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7775,7 +7775,7 @@ pub fn svcvt_f32_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7788,7 +7788,7 @@ pub fn svcvt_f64_s32_m(inactive: svfloat64_t, pg: svbool_t, op: svint32_t) -> sv } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7797,7 +7797,7 @@ pub fn svcvt_f64_s32_x(pg: svbool_t, op: svint32_t) -> svfloat64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7806,7 +7806,7 @@ pub fn svcvt_f64_s32_z(pg: svbool_t, op: svint32_t) -> svfloat64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7819,7 +7819,7 @@ pub fn svcvt_f64_s64_m(inactive: svfloat64_t, pg: svbool_t, op: svint64_t) -> sv } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7828,7 +7828,7 @@ pub fn svcvt_f64_s64_x(pg: svbool_t, op: svint64_t) -> svfloat64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(scvtf))] @@ -7837,7 +7837,7 @@ pub fn svcvt_f64_s64_z(pg: svbool_t, op: svint64_t) -> svfloat64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7850,7 +7850,7 @@ pub fn svcvt_f64_u32_m(inactive: svfloat64_t, pg: svbool_t, op: svuint32_t) -> s } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7859,7 +7859,7 @@ pub fn svcvt_f64_u32_x(pg: svbool_t, op: svuint32_t) -> svfloat64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7868,7 +7868,7 @@ pub fn svcvt_f64_u32_z(pg: svbool_t, op: svuint32_t) -> svfloat64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7881,7 +7881,7 @@ pub fn svcvt_f64_u64_m(inactive: svfloat64_t, pg: svbool_t, op: svuint64_t) -> s } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7890,7 +7890,7 @@ pub fn svcvt_f64_u64_x(pg: svbool_t, op: svuint64_t) -> svfloat64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_f64[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ucvtf))] @@ -7899,7 +7899,7 @@ pub fn svcvt_f64_u64_z(pg: svbool_t, op: svuint64_t) -> svfloat64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -7912,7 +7912,7 @@ pub fn svcvt_s32_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> sv } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -7921,7 +7921,7 @@ pub fn svcvt_s32_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -7930,7 +7930,7 @@ pub fn svcvt_s32_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -7943,7 +7943,7 @@ pub fn svcvt_s32_f64_m(inactive: svint32_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -7952,7 +7952,7 @@ pub fn svcvt_s32_f64_x(pg: svbool_t, op: svfloat64_t) -> svint32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s32[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -7961,7 +7961,7 @@ pub fn svcvt_s32_f64_z(pg: svbool_t, op: svfloat64_t) -> svint32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -7974,7 +7974,7 @@ pub fn svcvt_s64_f32_m(inactive: svint64_t, pg: svbool_t, op: svfloat32_t) -> sv } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -7983,7 +7983,7 @@ pub fn svcvt_s64_f32_x(pg: svbool_t, op: svfloat32_t) -> svint64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -7992,7 +7992,7 @@ pub fn svcvt_s64_f32_z(pg: svbool_t, op: svfloat32_t) -> svint64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -8005,7 +8005,7 @@ pub fn svcvt_s64_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -8014,7 +8014,7 @@ pub fn svcvt_s64_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_s64[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzs))] @@ -8023,7 +8023,7 @@ pub fn svcvt_s64_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8036,7 +8036,7 @@ pub fn svcvt_u32_f32_m(inactive: svuint32_t, pg: svbool_t, op: svfloat32_t) -> s } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8045,7 +8045,7 @@ pub fn svcvt_u32_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8054,7 +8054,7 @@ pub fn svcvt_u32_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8067,7 +8067,7 @@ pub fn svcvt_u32_f64_m(inactive: svuint32_t, pg: svbool_t, op: svfloat64_t) -> s } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8076,7 +8076,7 @@ pub fn svcvt_u32_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u32[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8085,7 +8085,7 @@ pub fn svcvt_u32_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint32_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8098,7 +8098,7 @@ pub fn svcvt_u64_f32_m(inactive: svuint64_t, pg: svbool_t, op: svfloat32_t) -> s } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8107,7 +8107,7 @@ pub fn svcvt_u64_f32_x(pg: svbool_t, op: svfloat32_t) -> svuint64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8116,7 +8116,7 @@ pub fn svcvt_u64_f32_z(pg: svbool_t, op: svfloat32_t) -> svuint64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8129,7 +8129,7 @@ pub fn svcvt_u64_f64_m(inactive: svuint64_t, pg: svbool_t, op: svfloat64_t) -> s } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8138,7 +8138,7 @@ pub fn svcvt_u64_f64_x(pg: svbool_t, op: svfloat64_t) -> svuint64_t { } #[doc = "Floating-point convert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvt_u64[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtzu))] @@ -8147,7 +8147,7 @@ pub fn svcvt_u64_f64_z(pg: svbool_t, op: svfloat64_t) -> svuint64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8160,7 +8160,7 @@ pub fn svdiv_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8169,7 +8169,7 @@ pub fn svdiv_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8178,7 +8178,7 @@ pub fn svdiv_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8187,7 +8187,7 @@ pub fn svdiv_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8196,7 +8196,7 @@ pub fn svdiv_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8205,7 +8205,7 @@ pub fn svdiv_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8218,7 +8218,7 @@ pub fn svdiv_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8227,7 +8227,7 @@ pub fn svdiv_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8236,7 +8236,7 @@ pub fn svdiv_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8245,7 +8245,7 @@ pub fn svdiv_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8254,7 +8254,7 @@ pub fn svdiv_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdiv))] @@ -8263,7 +8263,7 @@ pub fn svdiv_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8276,7 +8276,7 @@ pub fn svdiv_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8285,7 +8285,7 @@ pub fn svdiv_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8294,7 +8294,7 @@ pub fn svdiv_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8303,7 +8303,7 @@ pub fn svdiv_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8312,7 +8312,7 @@ pub fn svdiv_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8321,7 +8321,7 @@ pub fn svdiv_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8334,7 +8334,7 @@ pub fn svdiv_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8343,7 +8343,7 @@ pub fn svdiv_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8352,7 +8352,7 @@ pub fn svdiv_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8361,7 +8361,7 @@ pub fn svdiv_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8370,7 +8370,7 @@ pub fn svdiv_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdiv))] @@ -8379,7 +8379,7 @@ pub fn svdiv_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8392,7 +8392,7 @@ pub fn svdiv_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8401,7 +8401,7 @@ pub fn svdiv_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8410,7 +8410,7 @@ pub fn svdiv_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8419,7 +8419,7 @@ pub fn svdiv_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8428,7 +8428,7 @@ pub fn svdiv_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8437,7 +8437,7 @@ pub fn svdiv_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8450,7 +8450,7 @@ pub fn svdiv_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8459,7 +8459,7 @@ pub fn svdiv_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8468,7 +8468,7 @@ pub fn svdiv_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8477,7 +8477,7 @@ pub fn svdiv_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8486,7 +8486,7 @@ pub fn svdiv_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Divide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdiv[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udiv))] @@ -8495,7 +8495,7 @@ pub fn svdiv_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8508,7 +8508,7 @@ pub fn svdivr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8517,7 +8517,7 @@ pub fn svdivr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8526,7 +8526,7 @@ pub fn svdivr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8535,7 +8535,7 @@ pub fn svdivr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8544,7 +8544,7 @@ pub fn svdivr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8553,7 +8553,7 @@ pub fn svdivr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8566,7 +8566,7 @@ pub fn svdivr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8575,7 +8575,7 @@ pub fn svdivr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8584,7 +8584,7 @@ pub fn svdivr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8593,7 +8593,7 @@ pub fn svdivr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8602,7 +8602,7 @@ pub fn svdivr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fdivr))] @@ -8611,7 +8611,7 @@ pub fn svdivr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8624,7 +8624,7 @@ pub fn svdivr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8633,7 +8633,7 @@ pub fn svdivr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8642,7 +8642,7 @@ pub fn svdivr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8651,7 +8651,7 @@ pub fn svdivr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8660,7 +8660,7 @@ pub fn svdivr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8669,7 +8669,7 @@ pub fn svdivr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8682,7 +8682,7 @@ pub fn svdivr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8691,7 +8691,7 @@ pub fn svdivr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8700,7 +8700,7 @@ pub fn svdivr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8709,7 +8709,7 @@ pub fn svdivr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8718,7 +8718,7 @@ pub fn svdivr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdivr))] @@ -8727,7 +8727,7 @@ pub fn svdivr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8740,7 +8740,7 @@ pub fn svdivr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8749,7 +8749,7 @@ pub fn svdivr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8758,7 +8758,7 @@ pub fn svdivr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8767,7 +8767,7 @@ pub fn svdivr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8776,7 +8776,7 @@ pub fn svdivr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8785,7 +8785,7 @@ pub fn svdivr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8798,7 +8798,7 @@ pub fn svdivr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8807,7 +8807,7 @@ pub fn svdivr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8816,7 +8816,7 @@ pub fn svdivr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8825,7 +8825,7 @@ pub fn svdivr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8834,7 +8834,7 @@ pub fn svdivr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Divide reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdivr[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udivr))] @@ -8843,7 +8843,7 @@ pub fn svdivr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] @@ -8869,7 +8869,7 @@ pub fn svdot_lane_s32( } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdot, IMM_INDEX = 0))] @@ -8895,7 +8895,7 @@ pub fn svdot_lane_s64( } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] @@ -8923,7 +8923,7 @@ pub fn svdot_lane_u32( } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udot, IMM_INDEX = 0))] @@ -8951,7 +8951,7 @@ pub fn svdot_lane_u64( } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdot))] @@ -8964,7 +8964,7 @@ pub fn svdot_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdot))] @@ -8973,7 +8973,7 @@ pub fn svdot_n_s32(op1: svint32_t, op2: svint8_t, op3: i8) -> svint32_t { } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdot))] @@ -8986,7 +8986,7 @@ pub fn svdot_s64(op1: svint64_t, op2: svint16_t, op3: svint16_t) -> svint64_t { } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sdot))] @@ -8995,7 +8995,7 @@ pub fn svdot_n_s64(op1: svint64_t, op2: svint16_t, op3: i16) -> svint64_t { } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udot))] @@ -9008,7 +9008,7 @@ pub fn svdot_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udot))] @@ -9017,7 +9017,7 @@ pub fn svdot_n_u32(op1: svuint32_t, op2: svuint8_t, op3: u8) -> svuint32_t { } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udot))] @@ -9030,7 +9030,7 @@ pub fn svdot_u64(op1: svuint64_t, op2: svuint16_t, op3: svuint16_t) -> svuint64_ } #[doc = "Dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdot[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(udot))] @@ -9039,7 +9039,7 @@ pub fn svdot_n_u64(op1: svuint64_t, op2: svuint16_t, op3: u16) -> svuint64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9048,7 +9048,7 @@ pub fn svdup_lane_f32(data: svfloat32_t, index: u32) -> svfloat32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9057,7 +9057,7 @@ pub fn svdup_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9066,7 +9066,7 @@ pub fn svdup_lane_s8(data: svint8_t, index: u8) -> svint8_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9075,7 +9075,7 @@ pub fn svdup_lane_s16(data: svint16_t, index: u16) -> svint16_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9084,7 +9084,7 @@ pub fn svdup_lane_s32(data: svint32_t, index: u32) -> svint32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9093,7 +9093,7 @@ pub fn svdup_lane_s64(data: svint64_t, index: u64) -> svint64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9102,7 +9102,7 @@ pub fn svdup_lane_u8(data: svuint8_t, index: u8) -> svuint8_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9111,7 +9111,7 @@ pub fn svdup_lane_u16(data: svuint16_t, index: u16) -> svuint16_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9120,7 +9120,7 @@ pub fn svdup_lane_u32(data: svuint32_t, index: u32) -> svuint32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9129,7 +9129,7 @@ pub fn svdup_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbfx))] @@ -9143,7 +9143,7 @@ pub fn svdup_n_b8(op: bool) -> svbool_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbfx))] @@ -9157,7 +9157,7 @@ pub fn svdup_n_b16(op: bool) -> svbool_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbfx))] @@ -9171,7 +9171,7 @@ pub fn svdup_n_b32(op: bool) -> svbool_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbfx))] @@ -9185,7 +9185,7 @@ pub fn svdup_n_b64(op: bool) -> svbool_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9198,7 +9198,7 @@ pub fn svdup_n_f32(op: f32) -> svfloat32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9211,7 +9211,7 @@ pub fn svdup_n_f64(op: f64) -> svfloat64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9224,7 +9224,7 @@ pub fn svdup_n_s8(op: i8) -> svint8_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9237,7 +9237,7 @@ pub fn svdup_n_s16(op: i16) -> svint16_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9250,7 +9250,7 @@ pub fn svdup_n_s32(op: i32) -> svint32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9263,7 +9263,7 @@ pub fn svdup_n_s64(op: i64) -> svint64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9272,7 +9272,7 @@ pub fn svdup_n_u8(op: u8) -> svuint8_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9281,7 +9281,7 @@ pub fn svdup_n_u16(op: u16) -> svuint16_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9290,7 +9290,7 @@ pub fn svdup_n_u32(op: u32) -> svuint32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9299,7 +9299,7 @@ pub fn svdup_n_u64(op: u64) -> svuint64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9312,7 +9312,7 @@ pub fn svdup_n_f32_m(inactive: svfloat32_t, pg: svbool_t, op: f32) -> svfloat32_ } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9321,7 +9321,7 @@ pub fn svdup_n_f32_x(pg: svbool_t, op: f32) -> svfloat32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f32_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9330,7 +9330,7 @@ pub fn svdup_n_f32_z(pg: svbool_t, op: f32) -> svfloat32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9343,7 +9343,7 @@ pub fn svdup_n_f64_m(inactive: svfloat64_t, pg: svbool_t, op: f64) -> svfloat64_ } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9352,7 +9352,7 @@ pub fn svdup_n_f64_x(pg: svbool_t, op: f64) -> svfloat64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_f64_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9361,7 +9361,7 @@ pub fn svdup_n_f64_z(pg: svbool_t, op: f64) -> svfloat64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9374,7 +9374,7 @@ pub fn svdup_n_s8_m(inactive: svint8_t, pg: svbool_t, op: i8) -> svint8_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9383,7 +9383,7 @@ pub fn svdup_n_s8_x(pg: svbool_t, op: i8) -> svint8_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s8_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9392,7 +9392,7 @@ pub fn svdup_n_s8_z(pg: svbool_t, op: i8) -> svint8_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9405,7 +9405,7 @@ pub fn svdup_n_s16_m(inactive: svint16_t, pg: svbool_t, op: i16) -> svint16_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9414,7 +9414,7 @@ pub fn svdup_n_s16_x(pg: svbool_t, op: i16) -> svint16_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s16_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9423,7 +9423,7 @@ pub fn svdup_n_s16_z(pg: svbool_t, op: i16) -> svint16_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9436,7 +9436,7 @@ pub fn svdup_n_s32_m(inactive: svint32_t, pg: svbool_t, op: i32) -> svint32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9445,7 +9445,7 @@ pub fn svdup_n_s32_x(pg: svbool_t, op: i32) -> svint32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s32_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9454,7 +9454,7 @@ pub fn svdup_n_s32_z(pg: svbool_t, op: i32) -> svint32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9467,7 +9467,7 @@ pub fn svdup_n_s64_m(inactive: svint64_t, pg: svbool_t, op: i64) -> svint64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9476,7 +9476,7 @@ pub fn svdup_n_s64_x(pg: svbool_t, op: i64) -> svint64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_s64_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9485,7 +9485,7 @@ pub fn svdup_n_s64_z(pg: svbool_t, op: i64) -> svint64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9494,7 +9494,7 @@ pub fn svdup_n_u8_m(inactive: svuint8_t, pg: svbool_t, op: u8) -> svuint8_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9503,7 +9503,7 @@ pub fn svdup_n_u8_x(pg: svbool_t, op: u8) -> svuint8_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u8_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9512,7 +9512,7 @@ pub fn svdup_n_u8_z(pg: svbool_t, op: u8) -> svuint8_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9521,7 +9521,7 @@ pub fn svdup_n_u16_m(inactive: svuint16_t, pg: svbool_t, op: u16) -> svuint16_t } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9530,7 +9530,7 @@ pub fn svdup_n_u16_x(pg: svbool_t, op: u16) -> svuint16_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u16_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9539,7 +9539,7 @@ pub fn svdup_n_u16_z(pg: svbool_t, op: u16) -> svuint16_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9548,7 +9548,7 @@ pub fn svdup_n_u32_m(inactive: svuint32_t, pg: svbool_t, op: u32) -> svuint32_t } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9557,7 +9557,7 @@ pub fn svdup_n_u32_x(pg: svbool_t, op: u32) -> svuint32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u32_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9566,7 +9566,7 @@ pub fn svdup_n_u32_z(pg: svbool_t, op: u32) -> svuint32_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9575,7 +9575,7 @@ pub fn svdup_n_u64_m(inactive: svuint64_t, pg: svbool_t, op: u64) -> svuint64_t } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9584,7 +9584,7 @@ pub fn svdup_n_u64_x(pg: svbool_t, op: u64) -> svuint64_t { } #[doc = "Broadcast a scalar value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdup[_n]_u64_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -9593,7 +9593,7 @@ pub fn svdup_n_u64_z(pg: svbool_t, op: u64) -> svuint64_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9609,7 +9609,7 @@ pub fn svdupq_lane_f32(data: svfloat32_t, index: u64) -> svfloat32_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9625,7 +9625,7 @@ pub fn svdupq_lane_f64(data: svfloat64_t, index: u64) -> svfloat64_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9641,7 +9641,7 @@ pub fn svdupq_lane_s8(data: svint8_t, index: u64) -> svint8_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9657,7 +9657,7 @@ pub fn svdupq_lane_s16(data: svint16_t, index: u64) -> svint16_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9673,7 +9673,7 @@ pub fn svdupq_lane_s32(data: svint32_t, index: u64) -> svint32_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9689,7 +9689,7 @@ pub fn svdupq_lane_s64(data: svint64_t, index: u64) -> svint64_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9698,7 +9698,7 @@ pub fn svdupq_lane_u8(data: svuint8_t, index: u64) -> svuint8_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9707,7 +9707,7 @@ pub fn svdupq_lane_u16(data: svuint16_t, index: u64) -> svuint16_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9716,7 +9716,7 @@ pub fn svdupq_lane_u32(data: svuint32_t, index: u64) -> svuint32_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -9725,7 +9725,7 @@ pub fn svdupq_lane_u64(data: svuint64_t, index: u64) -> svuint64_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_b16( @@ -9745,7 +9745,7 @@ pub fn svdupq_n_b16( } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_b32(x0: bool, x1: bool, x2: bool, x3: bool) -> svbool_t { @@ -9754,7 +9754,7 @@ pub fn svdupq_n_b32(x0: bool, x1: bool, x2: bool, x3: bool) -> svbool_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_b64(x0: bool, x1: bool) -> svbool_t { @@ -9763,7 +9763,7 @@ pub fn svdupq_n_b64(x0: bool, x1: bool) -> svbool_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_b8( @@ -9792,7 +9792,7 @@ pub fn svdupq_n_b8( } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t { @@ -9810,7 +9810,7 @@ pub fn svdupq_n_f32(x0: f32, x1: f32, x2: f32, x3: f32) -> svfloat32_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t { @@ -9828,7 +9828,7 @@ pub fn svdupq_n_s32(x0: i32, x1: i32, x2: i32, x3: i32) -> svint32_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_u32(x0: u32, x1: u32, x2: u32, x3: u32) -> svuint32_t { @@ -9844,7 +9844,7 @@ pub fn svdupq_n_u32(x0: u32, x1: u32, x2: u32, x3: u32) -> svuint32_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_f64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t { @@ -9862,7 +9862,7 @@ pub fn svdupq_n_f64(x0: f64, x1: f64) -> svfloat64_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t { @@ -9880,7 +9880,7 @@ pub fn svdupq_n_s64(x0: i64, x1: i64) -> svint64_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_u64(x0: u64, x1: u64) -> svuint64_t { @@ -9888,7 +9888,7 @@ pub fn svdupq_n_u64(x0: u64, x1: u64) -> svuint64_t { } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_s16( @@ -9919,7 +9919,7 @@ pub fn svdupq_n_s16( } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_u16( @@ -9948,7 +9948,7 @@ pub fn svdupq_n_u16( } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_s8( @@ -9989,7 +9989,7 @@ pub fn svdupq_n_s8( } #[doc = "Broadcast a quadword of scalars"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svdupq[_n]_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svdupq_n_u8( @@ -10034,7 +10034,7 @@ pub fn svdupq_n_u8( } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10047,7 +10047,7 @@ pub fn sveor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10060,7 +10060,7 @@ pub fn sveor_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10069,7 +10069,7 @@ pub fn sveor_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10078,7 +10078,7 @@ pub fn sveor_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10087,7 +10087,7 @@ pub fn sveor_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10096,7 +10096,7 @@ pub fn sveor_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10105,7 +10105,7 @@ pub fn sveor_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10118,7 +10118,7 @@ pub fn sveor_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10127,7 +10127,7 @@ pub fn sveor_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10136,7 +10136,7 @@ pub fn sveor_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10145,7 +10145,7 @@ pub fn sveor_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10154,7 +10154,7 @@ pub fn sveor_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10163,7 +10163,7 @@ pub fn sveor_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10176,7 +10176,7 @@ pub fn sveor_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10185,7 +10185,7 @@ pub fn sveor_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10194,7 +10194,7 @@ pub fn sveor_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10203,7 +10203,7 @@ pub fn sveor_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10212,7 +10212,7 @@ pub fn sveor_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10221,7 +10221,7 @@ pub fn sveor_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10234,7 +10234,7 @@ pub fn sveor_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10243,7 +10243,7 @@ pub fn sveor_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10252,7 +10252,7 @@ pub fn sveor_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10261,7 +10261,7 @@ pub fn sveor_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10270,7 +10270,7 @@ pub fn sveor_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10279,7 +10279,7 @@ pub fn sveor_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10288,7 +10288,7 @@ pub fn sveor_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10297,7 +10297,7 @@ pub fn sveor_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10306,7 +10306,7 @@ pub fn sveor_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10315,7 +10315,7 @@ pub fn sveor_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10324,7 +10324,7 @@ pub fn sveor_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10333,7 +10333,7 @@ pub fn sveor_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10342,7 +10342,7 @@ pub fn sveor_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10351,7 +10351,7 @@ pub fn sveor_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10360,7 +10360,7 @@ pub fn sveor_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10369,7 +10369,7 @@ pub fn sveor_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10378,7 +10378,7 @@ pub fn sveor_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10387,7 +10387,7 @@ pub fn sveor_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10396,7 +10396,7 @@ pub fn sveor_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10405,7 +10405,7 @@ pub fn sveor_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10414,7 +10414,7 @@ pub fn sveor_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10423,7 +10423,7 @@ pub fn sveor_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10432,7 +10432,7 @@ pub fn sveor_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10441,7 +10441,7 @@ pub fn sveor_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10450,7 +10450,7 @@ pub fn sveor_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10459,7 +10459,7 @@ pub fn sveor_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10468,7 +10468,7 @@ pub fn sveor_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10477,7 +10477,7 @@ pub fn sveor_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10486,7 +10486,7 @@ pub fn sveor_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor))] @@ -10495,7 +10495,7 @@ pub fn sveor_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise exclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorv))] @@ -10508,7 +10508,7 @@ pub fn sveorv_s8(pg: svbool_t, op: svint8_t) -> i8 { } #[doc = "Bitwise exclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorv))] @@ -10521,7 +10521,7 @@ pub fn sveorv_s16(pg: svbool_t, op: svint16_t) -> i16 { } #[doc = "Bitwise exclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorv))] @@ -10534,7 +10534,7 @@ pub fn sveorv_s32(pg: svbool_t, op: svint32_t) -> i32 { } #[doc = "Bitwise exclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorv))] @@ -10547,7 +10547,7 @@ pub fn sveorv_s64(pg: svbool_t, op: svint64_t) -> i64 { } #[doc = "Bitwise exclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorv))] @@ -10556,7 +10556,7 @@ pub fn sveorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { } #[doc = "Bitwise exclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorv))] @@ -10565,7 +10565,7 @@ pub fn sveorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { } #[doc = "Bitwise exclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorv))] @@ -10574,7 +10574,7 @@ pub fn sveorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { } #[doc = "Bitwise exclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorv[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorv))] @@ -10583,7 +10583,7 @@ pub fn sveorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { } #[doc = "Floating-point exponential accelerator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fexpa))] @@ -10599,7 +10599,7 @@ pub fn svexpa_f32(op: svuint32_t) -> svfloat32_t { } #[doc = "Floating-point exponential accelerator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexpa[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fexpa))] @@ -10615,7 +10615,7 @@ pub fn svexpa_f64(op: svuint64_t) -> svfloat64_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ext, IMM3 = 1))] @@ -10629,7 +10629,7 @@ pub fn svext_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ext, IMM3 = 1))] @@ -10643,7 +10643,7 @@ pub fn svext_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ext, IMM3 = 1))] @@ -10657,7 +10657,7 @@ pub fn svext_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ext, IMM3 = 1))] @@ -10671,7 +10671,7 @@ pub fn svext_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ext, IMM3 = 1))] @@ -10685,7 +10685,7 @@ pub fn svext_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ext, IMM3 = 1))] @@ -10699,7 +10699,7 @@ pub fn svext_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ext, IMM3 = 1))] @@ -10709,7 +10709,7 @@ pub fn svext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ext, IMM3 = 1))] @@ -10719,7 +10719,7 @@ pub fn svext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ext, IMM3 = 1))] @@ -10729,7 +10729,7 @@ pub fn svext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svext[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ext, IMM3 = 1))] @@ -10739,7 +10739,7 @@ pub fn svext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Sign-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtb))] @@ -10752,7 +10752,7 @@ pub fn svextb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16 } #[doc = "Sign-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtb))] @@ -10761,7 +10761,7 @@ pub fn svextb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Sign-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtb))] @@ -10770,7 +10770,7 @@ pub fn svextb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Sign-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtb))] @@ -10783,7 +10783,7 @@ pub fn svextb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32 } #[doc = "Sign-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtb))] @@ -10792,7 +10792,7 @@ pub fn svextb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Sign-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtb))] @@ -10801,7 +10801,7 @@ pub fn svextb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Sign-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxth))] @@ -10814,7 +10814,7 @@ pub fn svexth_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32 } #[doc = "Sign-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxth))] @@ -10823,7 +10823,7 @@ pub fn svexth_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Sign-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxth))] @@ -10832,7 +10832,7 @@ pub fn svexth_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Sign-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtb))] @@ -10845,7 +10845,7 @@ pub fn svextb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64 } #[doc = "Sign-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtb))] @@ -10854,7 +10854,7 @@ pub fn svextb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Sign-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtb))] @@ -10863,7 +10863,7 @@ pub fn svextb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Sign-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxth))] @@ -10876,7 +10876,7 @@ pub fn svexth_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64 } #[doc = "Sign-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxth))] @@ -10885,7 +10885,7 @@ pub fn svexth_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Sign-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxth))] @@ -10894,7 +10894,7 @@ pub fn svexth_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Sign-extend the low 32 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtw))] @@ -10907,7 +10907,7 @@ pub fn svextw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64 } #[doc = "Sign-extend the low 32 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtw))] @@ -10916,7 +10916,7 @@ pub fn svextw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Sign-extend the low 32 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sxtw))] @@ -10925,7 +10925,7 @@ pub fn svextw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Zero-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtb))] @@ -10938,7 +10938,7 @@ pub fn svextb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuin } #[doc = "Zero-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtb))] @@ -10947,7 +10947,7 @@ pub fn svextb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Zero-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtb))] @@ -10956,7 +10956,7 @@ pub fn svextb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Zero-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtb))] @@ -10969,7 +10969,7 @@ pub fn svextb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuin } #[doc = "Zero-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtb))] @@ -10978,7 +10978,7 @@ pub fn svextb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Zero-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtb))] @@ -10987,7 +10987,7 @@ pub fn svextb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Zero-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxth))] @@ -11000,7 +11000,7 @@ pub fn svexth_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuin } #[doc = "Zero-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxth))] @@ -11009,7 +11009,7 @@ pub fn svexth_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Zero-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxth))] @@ -11018,7 +11018,7 @@ pub fn svexth_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Zero-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtb))] @@ -11031,7 +11031,7 @@ pub fn svextb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuin } #[doc = "Zero-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtb))] @@ -11040,7 +11040,7 @@ pub fn svextb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Zero-extend the low 8 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextb[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtb))] @@ -11049,7 +11049,7 @@ pub fn svextb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Zero-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxth))] @@ -11062,7 +11062,7 @@ pub fn svexth_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuin } #[doc = "Zero-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxth))] @@ -11071,7 +11071,7 @@ pub fn svexth_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Zero-extend the low 16 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svexth[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxth))] @@ -11080,7 +11080,7 @@ pub fn svexth_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Zero-extend the low 32 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtw))] @@ -11093,7 +11093,7 @@ pub fn svextw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuin } #[doc = "Zero-extend the low 32 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtw))] @@ -11102,7 +11102,7 @@ pub fn svextw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Zero-extend the low 32 bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svextw[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uxtw))] @@ -11111,7 +11111,7 @@ pub fn svextw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Extract one vector from a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget2_f32(tuple: svfloat32x2_t) -> svfloat32_t { @@ -11120,7 +11120,7 @@ pub fn svget2_f32(tuple: svfloat32x2_t) -> svfloat32_t { } #[doc = "Extract one vector from a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget2_f64(tuple: svfloat64x2_t) -> svfloat64_t { @@ -11129,7 +11129,7 @@ pub fn svget2_f64(tuple: svfloat64x2_t) -> svfloat64_t { } #[doc = "Extract one vector from a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget2_s8(tuple: svint8x2_t) -> svint8_t { @@ -11138,7 +11138,7 @@ pub fn svget2_s8(tuple: svint8x2_t) -> svint8_t { } #[doc = "Extract one vector from a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget2_s16(tuple: svint16x2_t) -> svint16_t { @@ -11147,7 +11147,7 @@ pub fn svget2_s16(tuple: svint16x2_t) -> svint16_t { } #[doc = "Extract one vector from a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget2_s32(tuple: svint32x2_t) -> svint32_t { @@ -11156,7 +11156,7 @@ pub fn svget2_s32(tuple: svint32x2_t) -> svint32_t { } #[doc = "Extract one vector from a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget2_s64(tuple: svint64x2_t) -> svint64_t { @@ -11165,7 +11165,7 @@ pub fn svget2_s64(tuple: svint64x2_t) -> svint64_t { } #[doc = "Extract one vector from a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget2_u8(tuple: svuint8x2_t) -> svuint8_t { @@ -11174,7 +11174,7 @@ pub fn svget2_u8(tuple: svuint8x2_t) -> svuint8_t { } #[doc = "Extract one vector from a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget2_u16(tuple: svuint16x2_t) -> svuint16_t { @@ -11183,7 +11183,7 @@ pub fn svget2_u16(tuple: svuint16x2_t) -> svuint16_t { } #[doc = "Extract one vector from a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget2_u32(tuple: svuint32x2_t) -> svuint32_t { @@ -11192,7 +11192,7 @@ pub fn svget2_u32(tuple: svuint32x2_t) -> svuint32_t { } #[doc = "Extract one vector from a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget2[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget2_u64(tuple: svuint64x2_t) -> svuint64_t { @@ -11201,7 +11201,7 @@ pub fn svget2_u64(tuple: svuint64x2_t) -> svuint64_t { } #[doc = "Extract one vector from a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget3_f32(tuple: svfloat32x3_t) -> svfloat32_t { @@ -11210,7 +11210,7 @@ pub fn svget3_f32(tuple: svfloat32x3_t) -> svfloat32_t { } #[doc = "Extract one vector from a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget3_f64(tuple: svfloat64x3_t) -> svfloat64_t { @@ -11219,7 +11219,7 @@ pub fn svget3_f64(tuple: svfloat64x3_t) -> svfloat64_t { } #[doc = "Extract one vector from a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget3_s8(tuple: svint8x3_t) -> svint8_t { @@ -11228,7 +11228,7 @@ pub fn svget3_s8(tuple: svint8x3_t) -> svint8_t { } #[doc = "Extract one vector from a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget3_s16(tuple: svint16x3_t) -> svint16_t { @@ -11237,7 +11237,7 @@ pub fn svget3_s16(tuple: svint16x3_t) -> svint16_t { } #[doc = "Extract one vector from a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget3_s32(tuple: svint32x3_t) -> svint32_t { @@ -11246,7 +11246,7 @@ pub fn svget3_s32(tuple: svint32x3_t) -> svint32_t { } #[doc = "Extract one vector from a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget3_s64(tuple: svint64x3_t) -> svint64_t { @@ -11255,7 +11255,7 @@ pub fn svget3_s64(tuple: svint64x3_t) -> svint64_t { } #[doc = "Extract one vector from a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget3_u8(tuple: svuint8x3_t) -> svuint8_t { @@ -11264,7 +11264,7 @@ pub fn svget3_u8(tuple: svuint8x3_t) -> svuint8_t { } #[doc = "Extract one vector from a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget3_u16(tuple: svuint16x3_t) -> svuint16_t { @@ -11273,7 +11273,7 @@ pub fn svget3_u16(tuple: svuint16x3_t) -> svuint16_t { } #[doc = "Extract one vector from a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget3_u32(tuple: svuint32x3_t) -> svuint32_t { @@ -11282,7 +11282,7 @@ pub fn svget3_u32(tuple: svuint32x3_t) -> svuint32_t { } #[doc = "Extract one vector from a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget3[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget3_u64(tuple: svuint64x3_t) -> svuint64_t { @@ -11291,7 +11291,7 @@ pub fn svget3_u64(tuple: svuint64x3_t) -> svuint64_t { } #[doc = "Extract one vector from a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget4_f32(tuple: svfloat32x4_t) -> svfloat32_t { @@ -11300,7 +11300,7 @@ pub fn svget4_f32(tuple: svfloat32x4_t) -> svfloat32_t { } #[doc = "Extract one vector from a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget4_f64(tuple: svfloat64x4_t) -> svfloat64_t { @@ -11309,7 +11309,7 @@ pub fn svget4_f64(tuple: svfloat64x4_t) -> svfloat64_t { } #[doc = "Extract one vector from a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget4_s8(tuple: svint8x4_t) -> svint8_t { @@ -11318,7 +11318,7 @@ pub fn svget4_s8(tuple: svint8x4_t) -> svint8_t { } #[doc = "Extract one vector from a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget4_s16(tuple: svint16x4_t) -> svint16_t { @@ -11327,7 +11327,7 @@ pub fn svget4_s16(tuple: svint16x4_t) -> svint16_t { } #[doc = "Extract one vector from a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget4_s32(tuple: svint32x4_t) -> svint32_t { @@ -11336,7 +11336,7 @@ pub fn svget4_s32(tuple: svint32x4_t) -> svint32_t { } #[doc = "Extract one vector from a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget4_s64(tuple: svint64x4_t) -> svint64_t { @@ -11345,7 +11345,7 @@ pub fn svget4_s64(tuple: svint64x4_t) -> svint64_t { } #[doc = "Extract one vector from a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget4_u8(tuple: svuint8x4_t) -> svuint8_t { @@ -11354,7 +11354,7 @@ pub fn svget4_u8(tuple: svuint8x4_t) -> svuint8_t { } #[doc = "Extract one vector from a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget4_u16(tuple: svuint16x4_t) -> svuint16_t { @@ -11363,7 +11363,7 @@ pub fn svget4_u16(tuple: svuint16x4_t) -> svuint16_t { } #[doc = "Extract one vector from a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget4_u32(tuple: svuint32x4_t) -> svuint32_t { @@ -11372,7 +11372,7 @@ pub fn svget4_u32(tuple: svuint32x4_t) -> svuint32_t { } #[doc = "Extract one vector from a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svget4[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svget4_u64(tuple: svuint64x4_t) -> svuint64_t { @@ -11381,7 +11381,7 @@ pub fn svget4_u64(tuple: svuint64x4_t) -> svuint64_t { } #[doc = "Create linear series"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(index))] @@ -11394,7 +11394,7 @@ pub fn svindex_s8(base: i8, step: i8) -> svint8_t { } #[doc = "Create linear series"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(index))] @@ -11407,7 +11407,7 @@ pub fn svindex_s16(base: i16, step: i16) -> svint16_t { } #[doc = "Create linear series"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(index))] @@ -11420,7 +11420,7 @@ pub fn svindex_s32(base: i32, step: i32) -> svint32_t { } #[doc = "Create linear series"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(index))] @@ -11433,7 +11433,7 @@ pub fn svindex_s64(base: i64, step: i64) -> svint64_t { } #[doc = "Create linear series"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(index))] @@ -11442,7 +11442,7 @@ pub fn svindex_u8(base: u8, step: u8) -> svuint8_t { } #[doc = "Create linear series"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(index))] @@ -11451,7 +11451,7 @@ pub fn svindex_u16(base: u16, step: u16) -> svuint16_t { } #[doc = "Create linear series"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(index))] @@ -11460,7 +11460,7 @@ pub fn svindex_u32(base: u32, step: u32) -> svuint32_t { } #[doc = "Create linear series"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svindex_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(index))] @@ -11469,7 +11469,7 @@ pub fn svindex_u64(base: u64, step: u64) -> svuint64_t { } #[doc = "Insert scalar in shifted vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(insr))] @@ -11482,7 +11482,7 @@ pub fn svinsr_n_f32(op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Insert scalar in shifted vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(insr))] @@ -11495,7 +11495,7 @@ pub fn svinsr_n_f64(op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Insert scalar in shifted vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(insr))] @@ -11508,7 +11508,7 @@ pub fn svinsr_n_s8(op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Insert scalar in shifted vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(insr))] @@ -11521,7 +11521,7 @@ pub fn svinsr_n_s16(op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Insert scalar in shifted vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(insr))] @@ -11534,7 +11534,7 @@ pub fn svinsr_n_s32(op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Insert scalar in shifted vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(insr))] @@ -11547,7 +11547,7 @@ pub fn svinsr_n_s64(op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Insert scalar in shifted vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(insr))] @@ -11556,7 +11556,7 @@ pub fn svinsr_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Insert scalar in shifted vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(insr))] @@ -11565,7 +11565,7 @@ pub fn svinsr_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Insert scalar in shifted vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(insr))] @@ -11574,7 +11574,7 @@ pub fn svinsr_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Insert scalar in shifted vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svinsr[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(insr))] @@ -11583,7 +11583,7 @@ pub fn svinsr_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lasta))] @@ -11596,7 +11596,7 @@ pub fn svlasta_f32(pg: svbool_t, op: svfloat32_t) -> f32 { } #[doc = "Extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lasta))] @@ -11609,7 +11609,7 @@ pub fn svlasta_f64(pg: svbool_t, op: svfloat64_t) -> f64 { } #[doc = "Extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lasta))] @@ -11622,7 +11622,7 @@ pub fn svlasta_s8(pg: svbool_t, op: svint8_t) -> i8 { } #[doc = "Extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lasta))] @@ -11635,7 +11635,7 @@ pub fn svlasta_s16(pg: svbool_t, op: svint16_t) -> i16 { } #[doc = "Extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lasta))] @@ -11648,7 +11648,7 @@ pub fn svlasta_s32(pg: svbool_t, op: svint32_t) -> i32 { } #[doc = "Extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lasta))] @@ -11661,7 +11661,7 @@ pub fn svlasta_s64(pg: svbool_t, op: svint64_t) -> i64 { } #[doc = "Extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lasta))] @@ -11670,7 +11670,7 @@ pub fn svlasta_u8(pg: svbool_t, op: svuint8_t) -> u8 { } #[doc = "Extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lasta))] @@ -11679,7 +11679,7 @@ pub fn svlasta_u16(pg: svbool_t, op: svuint16_t) -> u16 { } #[doc = "Extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lasta))] @@ -11688,7 +11688,7 @@ pub fn svlasta_u32(pg: svbool_t, op: svuint32_t) -> u32 { } #[doc = "Extract element after last"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlasta[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lasta))] @@ -11697,7 +11697,7 @@ pub fn svlasta_u64(pg: svbool_t, op: svuint64_t) -> u64 { } #[doc = "Extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lastb))] @@ -11710,7 +11710,7 @@ pub fn svlastb_f32(pg: svbool_t, op: svfloat32_t) -> f32 { } #[doc = "Extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lastb))] @@ -11723,7 +11723,7 @@ pub fn svlastb_f64(pg: svbool_t, op: svfloat64_t) -> f64 { } #[doc = "Extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lastb))] @@ -11736,7 +11736,7 @@ pub fn svlastb_s8(pg: svbool_t, op: svint8_t) -> i8 { } #[doc = "Extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lastb))] @@ -11749,7 +11749,7 @@ pub fn svlastb_s16(pg: svbool_t, op: svint16_t) -> i16 { } #[doc = "Extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lastb))] @@ -11762,7 +11762,7 @@ pub fn svlastb_s32(pg: svbool_t, op: svint32_t) -> i32 { } #[doc = "Extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lastb))] @@ -11775,7 +11775,7 @@ pub fn svlastb_s64(pg: svbool_t, op: svint64_t) -> i64 { } #[doc = "Extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lastb))] @@ -11784,7 +11784,7 @@ pub fn svlastb_u8(pg: svbool_t, op: svuint8_t) -> u8 { } #[doc = "Extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lastb))] @@ -11793,7 +11793,7 @@ pub fn svlastb_u16(pg: svbool_t, op: svuint16_t) -> u16 { } #[doc = "Extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lastb))] @@ -11802,7 +11802,7 @@ pub fn svlastb_u32(pg: svbool_t, op: svuint32_t) -> u32 { } #[doc = "Extract last element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlastb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lastb))] @@ -11814,7 +11814,7 @@ pub fn svlastb_u64(pg: svbool_t, op: svuint64_t) -> u64 { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -11830,7 +11830,7 @@ pub unsafe fn svld1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -11846,7 +11846,7 @@ pub unsafe fn svld1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -11862,7 +11862,7 @@ pub unsafe fn svld1_s8(pg: svbool_t, base: *const i8) -> svint8_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -11878,7 +11878,7 @@ pub unsafe fn svld1_s16(pg: svbool_t, base: *const i16) -> svint16_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -11894,7 +11894,7 @@ pub unsafe fn svld1_s32(pg: svbool_t, base: *const i32) -> svint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -11910,7 +11910,7 @@ pub unsafe fn svld1_s64(pg: svbool_t, base: *const i64) -> svint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -11922,7 +11922,7 @@ pub unsafe fn svld1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -11934,7 +11934,7 @@ pub unsafe fn svld1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -11946,7 +11946,7 @@ pub unsafe fn svld1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -11958,7 +11958,7 @@ pub unsafe fn svld1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -11985,7 +11985,7 @@ pub unsafe fn svld1_gather_s32index_f32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12012,7 +12012,7 @@ pub unsafe fn svld1_gather_s32index_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12028,7 +12028,7 @@ pub unsafe fn svld1_gather_s32index_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12055,7 +12055,7 @@ pub unsafe fn svld1_gather_s64index_f64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12082,7 +12082,7 @@ pub unsafe fn svld1_gather_s64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12098,7 +12098,7 @@ pub unsafe fn svld1_gather_s64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12125,7 +12125,7 @@ pub unsafe fn svld1_gather_u32index_f32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12152,7 +12152,7 @@ pub unsafe fn svld1_gather_u32index_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12168,7 +12168,7 @@ pub unsafe fn svld1_gather_u32index_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12184,7 +12184,7 @@ pub unsafe fn svld1_gather_u64index_f64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12200,7 +12200,7 @@ pub unsafe fn svld1_gather_u64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12216,7 +12216,7 @@ pub unsafe fn svld1_gather_u64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12243,7 +12243,7 @@ pub unsafe fn svld1_gather_s32offset_f32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12270,7 +12270,7 @@ pub unsafe fn svld1_gather_s32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12286,7 +12286,7 @@ pub unsafe fn svld1_gather_s32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12313,7 +12313,7 @@ pub unsafe fn svld1_gather_s64offset_f64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12340,7 +12340,7 @@ pub unsafe fn svld1_gather_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12356,7 +12356,7 @@ pub unsafe fn svld1_gather_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12383,7 +12383,7 @@ pub unsafe fn svld1_gather_u32offset_f32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12410,7 +12410,7 @@ pub unsafe fn svld1_gather_u32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12426,7 +12426,7 @@ pub unsafe fn svld1_gather_u32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12442,7 +12442,7 @@ pub unsafe fn svld1_gather_u64offset_f64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12458,7 +12458,7 @@ pub unsafe fn svld1_gather_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12475,7 +12475,7 @@ pub unsafe fn svld1_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12488,7 +12488,7 @@ pub unsafe fn svld1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svflo #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12501,7 +12501,7 @@ pub unsafe fn svld1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svint #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12514,7 +12514,7 @@ pub unsafe fn svld1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svuin #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12527,7 +12527,7 @@ pub unsafe fn svld1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svflo #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12540,7 +12540,7 @@ pub unsafe fn svld1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svint #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12553,7 +12553,7 @@ pub unsafe fn svld1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svuin #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12570,7 +12570,7 @@ pub unsafe fn svld1_gather_u32base_index_f32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12587,7 +12587,7 @@ pub unsafe fn svld1_gather_u32base_index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12604,7 +12604,7 @@ pub unsafe fn svld1_gather_u32base_index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12621,7 +12621,7 @@ pub unsafe fn svld1_gather_u64base_index_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12638,7 +12638,7 @@ pub unsafe fn svld1_gather_u64base_index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12655,7 +12655,7 @@ pub unsafe fn svld1_gather_u64base_index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12683,7 +12683,7 @@ pub unsafe fn svld1_gather_u32base_offset_f32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12711,7 +12711,7 @@ pub unsafe fn svld1_gather_u32base_offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12728,7 +12728,7 @@ pub unsafe fn svld1_gather_u32base_offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12756,7 +12756,7 @@ pub unsafe fn svld1_gather_u64base_offset_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12784,7 +12784,7 @@ pub unsafe fn svld1_gather_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12800,7 +12800,7 @@ pub unsafe fn svld1_gather_u64base_offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12812,7 +12812,7 @@ pub unsafe fn svld1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svflo #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12824,7 +12824,7 @@ pub unsafe fn svld1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svflo #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -12836,7 +12836,7 @@ pub unsafe fn svld1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8_ #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -12848,7 +12848,7 @@ pub unsafe fn svld1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12860,7 +12860,7 @@ pub unsafe fn svld1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12872,7 +12872,7 @@ pub unsafe fn svld1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -12884,7 +12884,7 @@ pub unsafe fn svld1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8 #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -12896,7 +12896,7 @@ pub unsafe fn svld1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -12908,7 +12908,7 @@ pub unsafe fn svld1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1d))] @@ -12920,7 +12920,7 @@ pub unsafe fn svld1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1row))] @@ -12936,7 +12936,7 @@ pub unsafe fn svld1ro_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rod))] @@ -12952,7 +12952,7 @@ pub unsafe fn svld1ro_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rob))] @@ -12968,7 +12968,7 @@ pub unsafe fn svld1ro_s8(pg: svbool_t, base: *const i8) -> svint8_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1roh))] @@ -12984,7 +12984,7 @@ pub unsafe fn svld1ro_s16(pg: svbool_t, base: *const i16) -> svint16_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1row))] @@ -13000,7 +13000,7 @@ pub unsafe fn svld1ro_s32(pg: svbool_t, base: *const i32) -> svint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rod))] @@ -13016,7 +13016,7 @@ pub unsafe fn svld1ro_s64(pg: svbool_t, base: *const i64) -> svint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rob))] @@ -13028,7 +13028,7 @@ pub unsafe fn svld1ro_u8(pg: svbool_t, base: *const u8) -> svuint8_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1roh))] @@ -13040,7 +13040,7 @@ pub unsafe fn svld1ro_u16(pg: svbool_t, base: *const u16) -> svuint16_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1row))] @@ -13052,7 +13052,7 @@ pub unsafe fn svld1ro_u32(pg: svbool_t, base: *const u32) -> svuint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rod))] @@ -13064,7 +13064,7 @@ pub unsafe fn svld1ro_u64(pg: svbool_t, base: *const u64) -> svuint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rqw))] @@ -13080,7 +13080,7 @@ pub unsafe fn svld1rq_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rqd))] @@ -13096,7 +13096,7 @@ pub unsafe fn svld1rq_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rqb))] @@ -13112,7 +13112,7 @@ pub unsafe fn svld1rq_s8(pg: svbool_t, base: *const i8) -> svint8_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rqh))] @@ -13128,7 +13128,7 @@ pub unsafe fn svld1rq_s16(pg: svbool_t, base: *const i16) -> svint16_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rqw))] @@ -13144,7 +13144,7 @@ pub unsafe fn svld1rq_s32(pg: svbool_t, base: *const i32) -> svint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rqd))] @@ -13160,7 +13160,7 @@ pub unsafe fn svld1rq_s64(pg: svbool_t, base: *const i64) -> svint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rqb))] @@ -13172,7 +13172,7 @@ pub unsafe fn svld1rq_u8(pg: svbool_t, base: *const u8) -> svuint8_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rqh))] @@ -13184,7 +13184,7 @@ pub unsafe fn svld1rq_u16(pg: svbool_t, base: *const u16) -> svuint16_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rqw))] @@ -13196,7 +13196,7 @@ pub unsafe fn svld1rq_u32(pg: svbool_t, base: *const u32) -> svuint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1rqd))] @@ -13208,7 +13208,7 @@ pub unsafe fn svld1rq_u64(pg: svbool_t, base: *const u64) -> svuint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13235,7 +13235,7 @@ pub unsafe fn svld1sb_gather_s32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13262,7 +13262,7 @@ pub unsafe fn svld1sh_gather_s32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13278,7 +13278,7 @@ pub unsafe fn svld1sb_gather_s32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13294,7 +13294,7 @@ pub unsafe fn svld1sh_gather_s32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13321,7 +13321,7 @@ pub unsafe fn svld1sb_gather_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13348,7 +13348,7 @@ pub unsafe fn svld1sh_gather_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -13375,7 +13375,7 @@ pub unsafe fn svld1sw_gather_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13391,7 +13391,7 @@ pub unsafe fn svld1sb_gather_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13407,7 +13407,7 @@ pub unsafe fn svld1sh_gather_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -13423,7 +13423,7 @@ pub unsafe fn svld1sw_gather_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13454,7 +13454,7 @@ pub unsafe fn svld1sb_gather_u32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13485,7 +13485,7 @@ pub unsafe fn svld1sh_gather_u32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13501,7 +13501,7 @@ pub unsafe fn svld1sb_gather_u32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13517,7 +13517,7 @@ pub unsafe fn svld1sh_gather_u32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13533,7 +13533,7 @@ pub unsafe fn svld1sb_gather_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13549,7 +13549,7 @@ pub unsafe fn svld1sh_gather_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -13565,7 +13565,7 @@ pub unsafe fn svld1sw_gather_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13581,7 +13581,7 @@ pub unsafe fn svld1sb_gather_u64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13597,7 +13597,7 @@ pub unsafe fn svld1sh_gather_u64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -13614,7 +13614,7 @@ pub unsafe fn svld1sw_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13646,7 +13646,7 @@ pub unsafe fn svld1sb_gather_u32base_offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13678,7 +13678,7 @@ pub unsafe fn svld1sh_gather_u32base_offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13695,7 +13695,7 @@ pub unsafe fn svld1sb_gather_u32base_offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13712,7 +13712,7 @@ pub unsafe fn svld1sh_gather_u32base_offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13744,7 +13744,7 @@ pub unsafe fn svld1sb_gather_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13776,7 +13776,7 @@ pub unsafe fn svld1sh_gather_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -13808,7 +13808,7 @@ pub unsafe fn svld1sw_gather_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13825,7 +13825,7 @@ pub unsafe fn svld1sb_gather_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13842,7 +13842,7 @@ pub unsafe fn svld1sh_gather_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -13859,7 +13859,7 @@ pub unsafe fn svld1sw_gather_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13872,7 +13872,7 @@ pub unsafe fn svld1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13885,7 +13885,7 @@ pub unsafe fn svld1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13898,7 +13898,7 @@ pub unsafe fn svld1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13911,7 +13911,7 @@ pub unsafe fn svld1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13924,7 +13924,7 @@ pub unsafe fn svld1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13937,7 +13937,7 @@ pub unsafe fn svld1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -13950,7 +13950,7 @@ pub unsafe fn svld1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -13963,7 +13963,7 @@ pub unsafe fn svld1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -13976,7 +13976,7 @@ pub unsafe fn svld1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -13988,7 +13988,7 @@ pub unsafe fn svld1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14004,7 +14004,7 @@ pub unsafe fn svld1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14020,7 +14020,7 @@ pub unsafe fn svld1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14036,7 +14036,7 @@ pub unsafe fn svld1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14052,7 +14052,7 @@ pub unsafe fn svld1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14068,7 +14068,7 @@ pub unsafe fn svld1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -14084,7 +14084,7 @@ pub unsafe fn svld1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14096,7 +14096,7 @@ pub unsafe fn svld1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14108,7 +14108,7 @@ pub unsafe fn svld1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14120,7 +14120,7 @@ pub unsafe fn svld1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14132,7 +14132,7 @@ pub unsafe fn svld1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14144,7 +14144,7 @@ pub unsafe fn svld1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -14156,7 +14156,7 @@ pub unsafe fn svld1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14168,7 +14168,7 @@ pub unsafe fn svld1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14180,7 +14180,7 @@ pub unsafe fn svld1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14192,7 +14192,7 @@ pub unsafe fn svld1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> svi #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14204,7 +14204,7 @@ pub unsafe fn svld1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14216,7 +14216,7 @@ pub unsafe fn svld1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> svi #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -14228,7 +14228,7 @@ pub unsafe fn svld1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> svi #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14240,7 +14240,7 @@ pub unsafe fn svld1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14252,7 +14252,7 @@ pub unsafe fn svld1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14264,7 +14264,7 @@ pub unsafe fn svld1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sb))] @@ -14276,7 +14276,7 @@ pub unsafe fn svld1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14288,7 +14288,7 @@ pub unsafe fn svld1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -14300,7 +14300,7 @@ pub unsafe fn svld1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14327,7 +14327,7 @@ pub unsafe fn svld1sh_gather_s32index_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14343,7 +14343,7 @@ pub unsafe fn svld1sh_gather_s32index_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14370,7 +14370,7 @@ pub unsafe fn svld1sh_gather_s64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -14397,7 +14397,7 @@ pub unsafe fn svld1sw_gather_s64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14413,7 +14413,7 @@ pub unsafe fn svld1sh_gather_s64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -14429,7 +14429,7 @@ pub unsafe fn svld1sw_gather_s64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14460,7 +14460,7 @@ pub unsafe fn svld1sh_gather_u32index_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14476,7 +14476,7 @@ pub unsafe fn svld1sh_gather_u32index_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14492,7 +14492,7 @@ pub unsafe fn svld1sh_gather_u64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -14508,7 +14508,7 @@ pub unsafe fn svld1sw_gather_u64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14524,7 +14524,7 @@ pub unsafe fn svld1sh_gather_u64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -14541,7 +14541,7 @@ pub unsafe fn svld1sw_gather_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14558,7 +14558,7 @@ pub unsafe fn svld1sh_gather_u32base_index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14575,7 +14575,7 @@ pub unsafe fn svld1sh_gather_u32base_index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14592,7 +14592,7 @@ pub unsafe fn svld1sh_gather_u64base_index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -14609,7 +14609,7 @@ pub unsafe fn svld1sw_gather_u64base_index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sh))] @@ -14626,7 +14626,7 @@ pub unsafe fn svld1sh_gather_u64base_index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1sw))] @@ -14642,7 +14642,7 @@ pub unsafe fn svld1sw_gather_u64base_index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -14658,7 +14658,7 @@ pub unsafe fn svld1ub_gather_s32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -14674,7 +14674,7 @@ pub unsafe fn svld1uh_gather_s32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -14703,7 +14703,7 @@ pub unsafe fn svld1ub_gather_s32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -14732,7 +14732,7 @@ pub unsafe fn svld1uh_gather_s32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -14748,7 +14748,7 @@ pub unsafe fn svld1ub_gather_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -14764,7 +14764,7 @@ pub unsafe fn svld1uh_gather_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -14780,7 +14780,7 @@ pub unsafe fn svld1uw_gather_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -14809,7 +14809,7 @@ pub unsafe fn svld1ub_gather_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -14838,7 +14838,7 @@ pub unsafe fn svld1uh_gather_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -14867,7 +14867,7 @@ pub unsafe fn svld1uw_gather_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -14883,7 +14883,7 @@ pub unsafe fn svld1ub_gather_u32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -14899,7 +14899,7 @@ pub unsafe fn svld1uh_gather_u32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -14929,7 +14929,7 @@ pub unsafe fn svld1ub_gather_u32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -14959,7 +14959,7 @@ pub unsafe fn svld1uh_gather_u32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -14975,7 +14975,7 @@ pub unsafe fn svld1ub_gather_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -14991,7 +14991,7 @@ pub unsafe fn svld1uh_gather_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15007,7 +15007,7 @@ pub unsafe fn svld1uw_gather_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15023,7 +15023,7 @@ pub unsafe fn svld1ub_gather_u64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15039,7 +15039,7 @@ pub unsafe fn svld1uh_gather_u64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15056,7 +15056,7 @@ pub unsafe fn svld1uw_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15073,7 +15073,7 @@ pub unsafe fn svld1ub_gather_u32base_offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15090,7 +15090,7 @@ pub unsafe fn svld1uh_gather_u32base_offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15120,7 +15120,7 @@ pub unsafe fn svld1ub_gather_u32base_offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15150,7 +15150,7 @@ pub unsafe fn svld1uh_gather_u32base_offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15167,7 +15167,7 @@ pub unsafe fn svld1ub_gather_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15184,7 +15184,7 @@ pub unsafe fn svld1uh_gather_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15201,7 +15201,7 @@ pub unsafe fn svld1uw_gather_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15231,7 +15231,7 @@ pub unsafe fn svld1ub_gather_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15261,7 +15261,7 @@ pub unsafe fn svld1uh_gather_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15291,7 +15291,7 @@ pub unsafe fn svld1uw_gather_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15304,7 +15304,7 @@ pub unsafe fn svld1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15317,7 +15317,7 @@ pub unsafe fn svld1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15330,7 +15330,7 @@ pub unsafe fn svld1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15343,7 +15343,7 @@ pub unsafe fn svld1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15356,7 +15356,7 @@ pub unsafe fn svld1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15369,7 +15369,7 @@ pub unsafe fn svld1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15382,7 +15382,7 @@ pub unsafe fn svld1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15395,7 +15395,7 @@ pub unsafe fn svld1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15408,7 +15408,7 @@ pub unsafe fn svld1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15420,7 +15420,7 @@ pub unsafe fn svld1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15438,7 +15438,7 @@ pub unsafe fn svld1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15456,7 +15456,7 @@ pub unsafe fn svld1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15474,7 +15474,7 @@ pub unsafe fn svld1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15492,7 +15492,7 @@ pub unsafe fn svld1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15510,7 +15510,7 @@ pub unsafe fn svld1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15528,7 +15528,7 @@ pub unsafe fn svld1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15540,7 +15540,7 @@ pub unsafe fn svld1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15552,7 +15552,7 @@ pub unsafe fn svld1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15564,7 +15564,7 @@ pub unsafe fn svld1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15576,7 +15576,7 @@ pub unsafe fn svld1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15588,7 +15588,7 @@ pub unsafe fn svld1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15600,7 +15600,7 @@ pub unsafe fn svld1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15612,7 +15612,7 @@ pub unsafe fn svld1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15624,7 +15624,7 @@ pub unsafe fn svld1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15636,7 +15636,7 @@ pub unsafe fn svld1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> svi #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15648,7 +15648,7 @@ pub unsafe fn svld1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15660,7 +15660,7 @@ pub unsafe fn svld1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> svi #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15672,7 +15672,7 @@ pub unsafe fn svld1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> svi #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15684,7 +15684,7 @@ pub unsafe fn svld1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15696,7 +15696,7 @@ pub unsafe fn svld1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15708,7 +15708,7 @@ pub unsafe fn svld1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1b))] @@ -15720,7 +15720,7 @@ pub unsafe fn svld1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15732,7 +15732,7 @@ pub unsafe fn svld1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15744,7 +15744,7 @@ pub unsafe fn svld1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15760,7 +15760,7 @@ pub unsafe fn svld1uh_gather_s32index_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15789,7 +15789,7 @@ pub unsafe fn svld1uh_gather_s32index_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15805,7 +15805,7 @@ pub unsafe fn svld1uh_gather_s64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15821,7 +15821,7 @@ pub unsafe fn svld1uw_gather_s64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15850,7 +15850,7 @@ pub unsafe fn svld1uh_gather_s64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15879,7 +15879,7 @@ pub unsafe fn svld1uw_gather_s64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15895,7 +15895,7 @@ pub unsafe fn svld1uh_gather_u32index_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15925,7 +15925,7 @@ pub unsafe fn svld1uh_gather_u32index_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15941,7 +15941,7 @@ pub unsafe fn svld1uh_gather_u64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15957,7 +15957,7 @@ pub unsafe fn svld1uw_gather_u64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -15973,7 +15973,7 @@ pub unsafe fn svld1uh_gather_u64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -15990,7 +15990,7 @@ pub unsafe fn svld1uw_gather_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -16007,7 +16007,7 @@ pub unsafe fn svld1uh_gather_u32base_index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -16024,7 +16024,7 @@ pub unsafe fn svld1uh_gather_u32base_index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -16041,7 +16041,7 @@ pub unsafe fn svld1uh_gather_u64base_index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -16058,7 +16058,7 @@ pub unsafe fn svld1uw_gather_u64base_index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1h))] @@ -16075,7 +16075,7 @@ pub unsafe fn svld1uh_gather_u64base_index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld1w))] @@ -16091,7 +16091,7 @@ pub unsafe fn svld1uw_gather_u64base_index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2w))] @@ -16110,7 +16110,7 @@ pub unsafe fn svld2_f32(pg: svbool_t, base: *const f32) -> svfloat32x2_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2d))] @@ -16129,7 +16129,7 @@ pub unsafe fn svld2_f64(pg: svbool_t, base: *const f64) -> svfloat64x2_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2b))] @@ -16148,7 +16148,7 @@ pub unsafe fn svld2_s8(pg: svbool_t, base: *const i8) -> svint8x2_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2h))] @@ -16167,7 +16167,7 @@ pub unsafe fn svld2_s16(pg: svbool_t, base: *const i16) -> svint16x2_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2w))] @@ -16186,7 +16186,7 @@ pub unsafe fn svld2_s32(pg: svbool_t, base: *const i32) -> svint32x2_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2d))] @@ -16205,7 +16205,7 @@ pub unsafe fn svld2_s64(pg: svbool_t, base: *const i64) -> svint64x2_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2b))] @@ -16217,7 +16217,7 @@ pub unsafe fn svld2_u8(pg: svbool_t, base: *const u8) -> svuint8x2_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2h))] @@ -16229,7 +16229,7 @@ pub unsafe fn svld2_u16(pg: svbool_t, base: *const u16) -> svuint16x2_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2w))] @@ -16241,7 +16241,7 @@ pub unsafe fn svld2_u32(pg: svbool_t, base: *const u32) -> svuint32x2_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2d))] @@ -16253,7 +16253,7 @@ pub unsafe fn svld2_u64(pg: svbool_t, base: *const u64) -> svuint64x2_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2w))] @@ -16265,7 +16265,7 @@ pub unsafe fn svld2_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svflo #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2d))] @@ -16277,7 +16277,7 @@ pub unsafe fn svld2_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svflo #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2b))] @@ -16289,7 +16289,7 @@ pub unsafe fn svld2_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2h))] @@ -16301,7 +16301,7 @@ pub unsafe fn svld2_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2w))] @@ -16313,7 +16313,7 @@ pub unsafe fn svld2_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2d))] @@ -16325,7 +16325,7 @@ pub unsafe fn svld2_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2b))] @@ -16337,7 +16337,7 @@ pub unsafe fn svld2_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8 #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2h))] @@ -16349,7 +16349,7 @@ pub unsafe fn svld2_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2w))] @@ -16361,7 +16361,7 @@ pub unsafe fn svld2_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld2d))] @@ -16373,7 +16373,7 @@ pub unsafe fn svld2_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3w))] @@ -16392,7 +16392,7 @@ pub unsafe fn svld3_f32(pg: svbool_t, base: *const f32) -> svfloat32x3_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3d))] @@ -16411,7 +16411,7 @@ pub unsafe fn svld3_f64(pg: svbool_t, base: *const f64) -> svfloat64x3_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3b))] @@ -16430,7 +16430,7 @@ pub unsafe fn svld3_s8(pg: svbool_t, base: *const i8) -> svint8x3_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3h))] @@ -16449,7 +16449,7 @@ pub unsafe fn svld3_s16(pg: svbool_t, base: *const i16) -> svint16x3_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3w))] @@ -16468,7 +16468,7 @@ pub unsafe fn svld3_s32(pg: svbool_t, base: *const i32) -> svint32x3_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3d))] @@ -16487,7 +16487,7 @@ pub unsafe fn svld3_s64(pg: svbool_t, base: *const i64) -> svint64x3_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3b))] @@ -16499,7 +16499,7 @@ pub unsafe fn svld3_u8(pg: svbool_t, base: *const u8) -> svuint8x3_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3h))] @@ -16511,7 +16511,7 @@ pub unsafe fn svld3_u16(pg: svbool_t, base: *const u16) -> svuint16x3_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3w))] @@ -16523,7 +16523,7 @@ pub unsafe fn svld3_u32(pg: svbool_t, base: *const u32) -> svuint32x3_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3d))] @@ -16535,7 +16535,7 @@ pub unsafe fn svld3_u64(pg: svbool_t, base: *const u64) -> svuint64x3_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3w))] @@ -16547,7 +16547,7 @@ pub unsafe fn svld3_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svflo #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3d))] @@ -16559,7 +16559,7 @@ pub unsafe fn svld3_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svflo #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3b))] @@ -16571,7 +16571,7 @@ pub unsafe fn svld3_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3h))] @@ -16583,7 +16583,7 @@ pub unsafe fn svld3_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3w))] @@ -16595,7 +16595,7 @@ pub unsafe fn svld3_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3d))] @@ -16607,7 +16607,7 @@ pub unsafe fn svld3_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3b))] @@ -16619,7 +16619,7 @@ pub unsafe fn svld3_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8 #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3h))] @@ -16631,7 +16631,7 @@ pub unsafe fn svld3_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3w))] @@ -16643,7 +16643,7 @@ pub unsafe fn svld3_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld3d))] @@ -16655,7 +16655,7 @@ pub unsafe fn svld3_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4w))] @@ -16674,7 +16674,7 @@ pub unsafe fn svld4_f32(pg: svbool_t, base: *const f32) -> svfloat32x4_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4d))] @@ -16693,7 +16693,7 @@ pub unsafe fn svld4_f64(pg: svbool_t, base: *const f64) -> svfloat64x4_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4b))] @@ -16712,7 +16712,7 @@ pub unsafe fn svld4_s8(pg: svbool_t, base: *const i8) -> svint8x4_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4h))] @@ -16731,7 +16731,7 @@ pub unsafe fn svld4_s16(pg: svbool_t, base: *const i16) -> svint16x4_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4w))] @@ -16750,7 +16750,7 @@ pub unsafe fn svld4_s32(pg: svbool_t, base: *const i32) -> svint32x4_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4d))] @@ -16769,7 +16769,7 @@ pub unsafe fn svld4_s64(pg: svbool_t, base: *const i64) -> svint64x4_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4b))] @@ -16781,7 +16781,7 @@ pub unsafe fn svld4_u8(pg: svbool_t, base: *const u8) -> svuint8x4_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4h))] @@ -16793,7 +16793,7 @@ pub unsafe fn svld4_u16(pg: svbool_t, base: *const u16) -> svuint16x4_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4w))] @@ -16805,7 +16805,7 @@ pub unsafe fn svld4_u32(pg: svbool_t, base: *const u32) -> svuint32x4_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4d))] @@ -16817,7 +16817,7 @@ pub unsafe fn svld4_u64(pg: svbool_t, base: *const u64) -> svuint64x4_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4w))] @@ -16829,7 +16829,7 @@ pub unsafe fn svld4_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svflo #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4d))] @@ -16841,7 +16841,7 @@ pub unsafe fn svld4_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svflo #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4b))] @@ -16853,7 +16853,7 @@ pub unsafe fn svld4_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint8x #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4h))] @@ -16865,7 +16865,7 @@ pub unsafe fn svld4_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4w))] @@ -16877,7 +16877,7 @@ pub unsafe fn svld4_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4d))] @@ -16889,7 +16889,7 @@ pub unsafe fn svld4_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4b))] @@ -16901,7 +16901,7 @@ pub unsafe fn svld4_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuint8 #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4h))] @@ -16913,7 +16913,7 @@ pub unsafe fn svld4_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4w))] @@ -16925,7 +16925,7 @@ pub unsafe fn svld4_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svuin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ld4d))] @@ -16938,7 +16938,7 @@ pub unsafe fn svld4_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svuin #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -16955,7 +16955,7 @@ pub unsafe fn svldff1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -16972,7 +16972,7 @@ pub unsafe fn svldff1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -16989,7 +16989,7 @@ pub unsafe fn svldff1_s8(pg: svbool_t, base: *const i8) -> svint8_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -17006,7 +17006,7 @@ pub unsafe fn svldff1_s16(pg: svbool_t, base: *const i16) -> svint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17023,7 +17023,7 @@ pub unsafe fn svldff1_s32(pg: svbool_t, base: *const i32) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17040,7 +17040,7 @@ pub unsafe fn svldff1_s64(pg: svbool_t, base: *const i64) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -17053,7 +17053,7 @@ pub unsafe fn svldff1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -17066,7 +17066,7 @@ pub unsafe fn svldff1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17079,7 +17079,7 @@ pub unsafe fn svldff1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17092,7 +17092,7 @@ pub unsafe fn svldff1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17120,7 +17120,7 @@ pub unsafe fn svldff1_gather_s32index_f32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17148,7 +17148,7 @@ pub unsafe fn svldff1_gather_s32index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17165,7 +17165,7 @@ pub unsafe fn svldff1_gather_s32index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17193,7 +17193,7 @@ pub unsafe fn svldff1_gather_s64index_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17221,7 +17221,7 @@ pub unsafe fn svldff1_gather_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17238,7 +17238,7 @@ pub unsafe fn svldff1_gather_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17266,7 +17266,7 @@ pub unsafe fn svldff1_gather_u32index_f32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17294,7 +17294,7 @@ pub unsafe fn svldff1_gather_u32index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17311,7 +17311,7 @@ pub unsafe fn svldff1_gather_u32index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17328,7 +17328,7 @@ pub unsafe fn svldff1_gather_u64index_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17345,7 +17345,7 @@ pub unsafe fn svldff1_gather_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17362,7 +17362,7 @@ pub unsafe fn svldff1_gather_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17390,7 +17390,7 @@ pub unsafe fn svldff1_gather_s32offset_f32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17418,7 +17418,7 @@ pub unsafe fn svldff1_gather_s32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17435,7 +17435,7 @@ pub unsafe fn svldff1_gather_s32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17463,7 +17463,7 @@ pub unsafe fn svldff1_gather_s64offset_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17491,7 +17491,7 @@ pub unsafe fn svldff1_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17508,7 +17508,7 @@ pub unsafe fn svldff1_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17536,7 +17536,7 @@ pub unsafe fn svldff1_gather_u32offset_f32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17564,7 +17564,7 @@ pub unsafe fn svldff1_gather_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17581,7 +17581,7 @@ pub unsafe fn svldff1_gather_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17598,7 +17598,7 @@ pub unsafe fn svldff1_gather_u64offset_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17615,7 +17615,7 @@ pub unsafe fn svldff1_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17633,7 +17633,7 @@ pub unsafe fn svldff1_gather_u64offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17647,7 +17647,7 @@ pub unsafe fn svldff1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svf #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17661,7 +17661,7 @@ pub unsafe fn svldff1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svi #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17675,7 +17675,7 @@ pub unsafe fn svldff1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svu #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17689,7 +17689,7 @@ pub unsafe fn svldff1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svf #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17703,7 +17703,7 @@ pub unsafe fn svldff1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svi #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17717,7 +17717,7 @@ pub unsafe fn svldff1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svu #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17735,7 +17735,7 @@ pub unsafe fn svldff1_gather_u32base_index_f32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17753,7 +17753,7 @@ pub unsafe fn svldff1_gather_u32base_index_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17771,7 +17771,7 @@ pub unsafe fn svldff1_gather_u32base_index_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17789,7 +17789,7 @@ pub unsafe fn svldff1_gather_u64base_index_f64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17807,7 +17807,7 @@ pub unsafe fn svldff1_gather_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17825,7 +17825,7 @@ pub unsafe fn svldff1_gather_u64base_index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17854,7 +17854,7 @@ pub unsafe fn svldff1_gather_u32base_offset_f32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17883,7 +17883,7 @@ pub unsafe fn svldff1_gather_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17901,7 +17901,7 @@ pub unsafe fn svldff1_gather_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17930,7 +17930,7 @@ pub unsafe fn svldff1_gather_u64base_offset_f64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17959,7 +17959,7 @@ pub unsafe fn svldff1_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -17976,7 +17976,7 @@ pub unsafe fn svldff1_gather_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -17989,7 +17989,7 @@ pub unsafe fn svldff1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svf #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -18002,7 +18002,7 @@ pub unsafe fn svldff1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svf #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -18015,7 +18015,7 @@ pub unsafe fn svldff1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -18028,7 +18028,7 @@ pub unsafe fn svldff1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -18041,7 +18041,7 @@ pub unsafe fn svldff1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -18054,7 +18054,7 @@ pub unsafe fn svldff1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -18067,7 +18067,7 @@ pub unsafe fn svldff1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuin #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -18080,7 +18080,7 @@ pub unsafe fn svldff1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -18093,7 +18093,7 @@ pub unsafe fn svldff1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1d))] @@ -18106,7 +18106,7 @@ pub unsafe fn svldff1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18138,7 +18138,7 @@ pub unsafe fn svldff1sb_gather_s32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18170,7 +18170,7 @@ pub unsafe fn svldff1sh_gather_s32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18187,7 +18187,7 @@ pub unsafe fn svldff1sb_gather_s32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18204,7 +18204,7 @@ pub unsafe fn svldff1sh_gather_s32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18236,7 +18236,7 @@ pub unsafe fn svldff1sb_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18268,7 +18268,7 @@ pub unsafe fn svldff1sh_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -18300,7 +18300,7 @@ pub unsafe fn svldff1sw_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18317,7 +18317,7 @@ pub unsafe fn svldff1sb_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18334,7 +18334,7 @@ pub unsafe fn svldff1sh_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -18351,7 +18351,7 @@ pub unsafe fn svldff1sw_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18383,7 +18383,7 @@ pub unsafe fn svldff1sb_gather_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18415,7 +18415,7 @@ pub unsafe fn svldff1sh_gather_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18432,7 +18432,7 @@ pub unsafe fn svldff1sb_gather_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18449,7 +18449,7 @@ pub unsafe fn svldff1sh_gather_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18466,7 +18466,7 @@ pub unsafe fn svldff1sb_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18483,7 +18483,7 @@ pub unsafe fn svldff1sh_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -18500,7 +18500,7 @@ pub unsafe fn svldff1sw_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18517,7 +18517,7 @@ pub unsafe fn svldff1sb_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18534,7 +18534,7 @@ pub unsafe fn svldff1sh_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -18552,7 +18552,7 @@ pub unsafe fn svldff1sw_gather_u64offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18585,7 +18585,7 @@ pub unsafe fn svldff1sb_gather_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18618,7 +18618,7 @@ pub unsafe fn svldff1sh_gather_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18636,7 +18636,7 @@ pub unsafe fn svldff1sb_gather_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18654,7 +18654,7 @@ pub unsafe fn svldff1sh_gather_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18687,7 +18687,7 @@ pub unsafe fn svldff1sb_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18720,7 +18720,7 @@ pub unsafe fn svldff1sh_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -18753,7 +18753,7 @@ pub unsafe fn svldff1sw_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18771,7 +18771,7 @@ pub unsafe fn svldff1sb_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18789,7 +18789,7 @@ pub unsafe fn svldff1sh_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -18807,7 +18807,7 @@ pub unsafe fn svldff1sw_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18821,7 +18821,7 @@ pub unsafe fn svldff1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18835,7 +18835,7 @@ pub unsafe fn svldff1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18849,7 +18849,7 @@ pub unsafe fn svldff1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18863,7 +18863,7 @@ pub unsafe fn svldff1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18877,7 +18877,7 @@ pub unsafe fn svldff1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18891,7 +18891,7 @@ pub unsafe fn svldff1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -18905,7 +18905,7 @@ pub unsafe fn svldff1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18919,7 +18919,7 @@ pub unsafe fn svldff1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18933,7 +18933,7 @@ pub unsafe fn svldff1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -18946,7 +18946,7 @@ pub unsafe fn svldff1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18963,7 +18963,7 @@ pub unsafe fn svldff1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -18980,7 +18980,7 @@ pub unsafe fn svldff1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -18997,7 +18997,7 @@ pub unsafe fn svldff1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -19014,7 +19014,7 @@ pub unsafe fn svldff1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19031,7 +19031,7 @@ pub unsafe fn svldff1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -19048,7 +19048,7 @@ pub unsafe fn svldff1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -19061,7 +19061,7 @@ pub unsafe fn svldff1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -19074,7 +19074,7 @@ pub unsafe fn svldff1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19087,7 +19087,7 @@ pub unsafe fn svldff1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -19100,7 +19100,7 @@ pub unsafe fn svldff1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19113,7 +19113,7 @@ pub unsafe fn svldff1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -19126,7 +19126,7 @@ pub unsafe fn svldff1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -19139,7 +19139,7 @@ pub unsafe fn svldff1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -19152,7 +19152,7 @@ pub unsafe fn svldff1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19165,7 +19165,7 @@ pub unsafe fn svldff1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -19178,7 +19178,7 @@ pub unsafe fn svldff1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19191,7 +19191,7 @@ pub unsafe fn svldff1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -19204,7 +19204,7 @@ pub unsafe fn svldff1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -19217,7 +19217,7 @@ pub unsafe fn svldff1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -19230,7 +19230,7 @@ pub unsafe fn svldff1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19243,7 +19243,7 @@ pub unsafe fn svldff1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sb))] @@ -19256,7 +19256,7 @@ pub unsafe fn svldff1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19269,7 +19269,7 @@ pub unsafe fn svldff1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -19282,7 +19282,7 @@ pub unsafe fn svldff1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19310,7 +19310,7 @@ pub unsafe fn svldff1sh_gather_s32index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19327,7 +19327,7 @@ pub unsafe fn svldff1sh_gather_s32index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19355,7 +19355,7 @@ pub unsafe fn svldff1sh_gather_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -19383,7 +19383,7 @@ pub unsafe fn svldff1sw_gather_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19400,7 +19400,7 @@ pub unsafe fn svldff1sh_gather_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -19417,7 +19417,7 @@ pub unsafe fn svldff1sw_gather_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19449,7 +19449,7 @@ pub unsafe fn svldff1sh_gather_u32index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19466,7 +19466,7 @@ pub unsafe fn svldff1sh_gather_u32index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19483,7 +19483,7 @@ pub unsafe fn svldff1sh_gather_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -19500,7 +19500,7 @@ pub unsafe fn svldff1sw_gather_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19517,7 +19517,7 @@ pub unsafe fn svldff1sh_gather_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -19535,7 +19535,7 @@ pub unsafe fn svldff1sw_gather_u64index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19553,7 +19553,7 @@ pub unsafe fn svldff1sh_gather_u32base_index_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19571,7 +19571,7 @@ pub unsafe fn svldff1sh_gather_u32base_index_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19589,7 +19589,7 @@ pub unsafe fn svldff1sh_gather_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -19607,7 +19607,7 @@ pub unsafe fn svldff1sw_gather_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sh))] @@ -19625,7 +19625,7 @@ pub unsafe fn svldff1sh_gather_u64base_index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1sw))] @@ -19642,7 +19642,7 @@ pub unsafe fn svldff1sw_gather_u64base_index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -19659,7 +19659,7 @@ pub unsafe fn svldff1ub_gather_s32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -19676,7 +19676,7 @@ pub unsafe fn svldff1uh_gather_s32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -19706,7 +19706,7 @@ pub unsafe fn svldff1ub_gather_s32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -19736,7 +19736,7 @@ pub unsafe fn svldff1uh_gather_s32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -19753,7 +19753,7 @@ pub unsafe fn svldff1ub_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -19770,7 +19770,7 @@ pub unsafe fn svldff1uh_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -19787,7 +19787,7 @@ pub unsafe fn svldff1uw_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -19817,7 +19817,7 @@ pub unsafe fn svldff1ub_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -19847,7 +19847,7 @@ pub unsafe fn svldff1uh_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -19877,7 +19877,7 @@ pub unsafe fn svldff1uw_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -19894,7 +19894,7 @@ pub unsafe fn svldff1ub_gather_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -19911,7 +19911,7 @@ pub unsafe fn svldff1uh_gather_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -19942,7 +19942,7 @@ pub unsafe fn svldff1ub_gather_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -19973,7 +19973,7 @@ pub unsafe fn svldff1uh_gather_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -19990,7 +19990,7 @@ pub unsafe fn svldff1ub_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20007,7 +20007,7 @@ pub unsafe fn svldff1uh_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20024,7 +20024,7 @@ pub unsafe fn svldff1uw_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20041,7 +20041,7 @@ pub unsafe fn svldff1ub_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20058,7 +20058,7 @@ pub unsafe fn svldff1uh_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20076,7 +20076,7 @@ pub unsafe fn svldff1uw_gather_u64offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20094,7 +20094,7 @@ pub unsafe fn svldff1ub_gather_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20112,7 +20112,7 @@ pub unsafe fn svldff1uh_gather_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20144,7 +20144,7 @@ pub unsafe fn svldff1ub_gather_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20176,7 +20176,7 @@ pub unsafe fn svldff1uh_gather_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20194,7 +20194,7 @@ pub unsafe fn svldff1ub_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20212,7 +20212,7 @@ pub unsafe fn svldff1uh_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20230,7 +20230,7 @@ pub unsafe fn svldff1uw_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20262,7 +20262,7 @@ pub unsafe fn svldff1ub_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20294,7 +20294,7 @@ pub unsafe fn svldff1uh_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20326,7 +20326,7 @@ pub unsafe fn svldff1uw_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20340,7 +20340,7 @@ pub unsafe fn svldff1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20354,7 +20354,7 @@ pub unsafe fn svldff1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20368,7 +20368,7 @@ pub unsafe fn svldff1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20382,7 +20382,7 @@ pub unsafe fn svldff1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20396,7 +20396,7 @@ pub unsafe fn svldff1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20410,7 +20410,7 @@ pub unsafe fn svldff1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20424,7 +20424,7 @@ pub unsafe fn svldff1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20438,7 +20438,7 @@ pub unsafe fn svldff1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20452,7 +20452,7 @@ pub unsafe fn svldff1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20465,7 +20465,7 @@ pub unsafe fn svldff1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20484,7 +20484,7 @@ pub unsafe fn svldff1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20503,7 +20503,7 @@ pub unsafe fn svldff1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20522,7 +20522,7 @@ pub unsafe fn svldff1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20541,7 +20541,7 @@ pub unsafe fn svldff1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20560,7 +20560,7 @@ pub unsafe fn svldff1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20579,7 +20579,7 @@ pub unsafe fn svldff1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20592,7 +20592,7 @@ pub unsafe fn svldff1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20605,7 +20605,7 @@ pub unsafe fn svldff1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20618,7 +20618,7 @@ pub unsafe fn svldff1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20631,7 +20631,7 @@ pub unsafe fn svldff1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20644,7 +20644,7 @@ pub unsafe fn svldff1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20657,7 +20657,7 @@ pub unsafe fn svldff1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20670,7 +20670,7 @@ pub unsafe fn svldff1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20683,7 +20683,7 @@ pub unsafe fn svldff1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20696,7 +20696,7 @@ pub unsafe fn svldff1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20709,7 +20709,7 @@ pub unsafe fn svldff1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20722,7 +20722,7 @@ pub unsafe fn svldff1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20735,7 +20735,7 @@ pub unsafe fn svldff1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20748,7 +20748,7 @@ pub unsafe fn svldff1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20761,7 +20761,7 @@ pub unsafe fn svldff1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20774,7 +20774,7 @@ pub unsafe fn svldff1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1b))] @@ -20787,7 +20787,7 @@ pub unsafe fn svldff1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20800,7 +20800,7 @@ pub unsafe fn svldff1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20813,7 +20813,7 @@ pub unsafe fn svldff1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20830,7 +20830,7 @@ pub unsafe fn svldff1uh_gather_s32index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20860,7 +20860,7 @@ pub unsafe fn svldff1uh_gather_s32index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20877,7 +20877,7 @@ pub unsafe fn svldff1uh_gather_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20894,7 +20894,7 @@ pub unsafe fn svldff1uw_gather_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20924,7 +20924,7 @@ pub unsafe fn svldff1uh_gather_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -20954,7 +20954,7 @@ pub unsafe fn svldff1uw_gather_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -20971,7 +20971,7 @@ pub unsafe fn svldff1uh_gather_u32index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -21002,7 +21002,7 @@ pub unsafe fn svldff1uh_gather_u32index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -21019,7 +21019,7 @@ pub unsafe fn svldff1uh_gather_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -21036,7 +21036,7 @@ pub unsafe fn svldff1uw_gather_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -21053,7 +21053,7 @@ pub unsafe fn svldff1uh_gather_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -21071,7 +21071,7 @@ pub unsafe fn svldff1uw_gather_u64index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -21089,7 +21089,7 @@ pub unsafe fn svldff1uh_gather_u32base_index_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -21107,7 +21107,7 @@ pub unsafe fn svldff1uh_gather_u32base_index_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -21125,7 +21125,7 @@ pub unsafe fn svldff1uh_gather_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -21143,7 +21143,7 @@ pub unsafe fn svldff1uw_gather_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1h))] @@ -21161,7 +21161,7 @@ pub unsafe fn svldff1uh_gather_u64base_index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and first-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldff1w))] @@ -21178,7 +21178,7 @@ pub unsafe fn svldff1uw_gather_u64base_index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1w))] @@ -21195,7 +21195,7 @@ pub unsafe fn svldnf1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1d))] @@ -21212,7 +21212,7 @@ pub unsafe fn svldnf1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -21229,7 +21229,7 @@ pub unsafe fn svldnf1_s8(pg: svbool_t, base: *const i8) -> svint8_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -21246,7 +21246,7 @@ pub unsafe fn svldnf1_s16(pg: svbool_t, base: *const i16) -> svint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1w))] @@ -21263,7 +21263,7 @@ pub unsafe fn svldnf1_s32(pg: svbool_t, base: *const i32) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1d))] @@ -21280,7 +21280,7 @@ pub unsafe fn svldnf1_s64(pg: svbool_t, base: *const i64) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -21293,7 +21293,7 @@ pub unsafe fn svldnf1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -21306,7 +21306,7 @@ pub unsafe fn svldnf1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1w))] @@ -21319,7 +21319,7 @@ pub unsafe fn svldnf1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1d))] @@ -21332,7 +21332,7 @@ pub unsafe fn svldnf1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1w))] @@ -21345,7 +21345,7 @@ pub unsafe fn svldnf1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svf #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1d))] @@ -21358,7 +21358,7 @@ pub unsafe fn svldnf1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svf #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -21371,7 +21371,7 @@ pub unsafe fn svldnf1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -21384,7 +21384,7 @@ pub unsafe fn svldnf1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1w))] @@ -21397,7 +21397,7 @@ pub unsafe fn svldnf1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1d))] @@ -21410,7 +21410,7 @@ pub unsafe fn svldnf1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -21423,7 +21423,7 @@ pub unsafe fn svldnf1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuin #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -21436,7 +21436,7 @@ pub unsafe fn svldnf1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1w))] @@ -21449,7 +21449,7 @@ pub unsafe fn svldnf1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1d))] @@ -21462,7 +21462,7 @@ pub unsafe fn svldnf1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21479,7 +21479,7 @@ pub unsafe fn svldnf1sb_s16(pg: svbool_t, base: *const i8) -> svint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21496,7 +21496,7 @@ pub unsafe fn svldnf1sb_s32(pg: svbool_t, base: *const i8) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sh))] @@ -21513,7 +21513,7 @@ pub unsafe fn svldnf1sh_s32(pg: svbool_t, base: *const i16) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21530,7 +21530,7 @@ pub unsafe fn svldnf1sb_s64(pg: svbool_t, base: *const i8) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sh))] @@ -21547,7 +21547,7 @@ pub unsafe fn svldnf1sh_s64(pg: svbool_t, base: *const i16) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sw))] @@ -21564,7 +21564,7 @@ pub unsafe fn svldnf1sw_s64(pg: svbool_t, base: *const i32) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21577,7 +21577,7 @@ pub unsafe fn svldnf1sb_u16(pg: svbool_t, base: *const i8) -> svuint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21590,7 +21590,7 @@ pub unsafe fn svldnf1sb_u32(pg: svbool_t, base: *const i8) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sh))] @@ -21603,7 +21603,7 @@ pub unsafe fn svldnf1sh_u32(pg: svbool_t, base: *const i16) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21616,7 +21616,7 @@ pub unsafe fn svldnf1sb_u64(pg: svbool_t, base: *const i8) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sh))] @@ -21629,7 +21629,7 @@ pub unsafe fn svldnf1sh_u64(pg: svbool_t, base: *const i16) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sw))] @@ -21642,7 +21642,7 @@ pub unsafe fn svldnf1sw_u64(pg: svbool_t, base: *const i32) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21655,7 +21655,7 @@ pub unsafe fn svldnf1sb_vnum_s16(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21668,7 +21668,7 @@ pub unsafe fn svldnf1sb_vnum_s32(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sh))] @@ -21681,7 +21681,7 @@ pub unsafe fn svldnf1sh_vnum_s32(pg: svbool_t, base: *const i16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21694,7 +21694,7 @@ pub unsafe fn svldnf1sb_vnum_s64(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sh))] @@ -21707,7 +21707,7 @@ pub unsafe fn svldnf1sh_vnum_s64(pg: svbool_t, base: *const i16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sw))] @@ -21720,7 +21720,7 @@ pub unsafe fn svldnf1sw_vnum_s64(pg: svbool_t, base: *const i32, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21733,7 +21733,7 @@ pub unsafe fn svldnf1sb_vnum_u16(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21746,7 +21746,7 @@ pub unsafe fn svldnf1sb_vnum_u32(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sh))] @@ -21759,7 +21759,7 @@ pub unsafe fn svldnf1sh_vnum_u32(pg: svbool_t, base: *const i16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sb))] @@ -21772,7 +21772,7 @@ pub unsafe fn svldnf1sb_vnum_u64(pg: svbool_t, base: *const i8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sh))] @@ -21785,7 +21785,7 @@ pub unsafe fn svldnf1sh_vnum_u64(pg: svbool_t, base: *const i16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1sw))] @@ -21798,7 +21798,7 @@ pub unsafe fn svldnf1sw_vnum_u64(pg: svbool_t, base: *const i32, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -21817,7 +21817,7 @@ pub unsafe fn svldnf1ub_s16(pg: svbool_t, base: *const u8) -> svint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -21836,7 +21836,7 @@ pub unsafe fn svldnf1ub_s32(pg: svbool_t, base: *const u8) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -21855,7 +21855,7 @@ pub unsafe fn svldnf1uh_s32(pg: svbool_t, base: *const u16) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -21874,7 +21874,7 @@ pub unsafe fn svldnf1ub_s64(pg: svbool_t, base: *const u8) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -21893,7 +21893,7 @@ pub unsafe fn svldnf1uh_s64(pg: svbool_t, base: *const u16) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1w))] @@ -21912,7 +21912,7 @@ pub unsafe fn svldnf1uw_s64(pg: svbool_t, base: *const u32) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -21925,7 +21925,7 @@ pub unsafe fn svldnf1ub_u16(pg: svbool_t, base: *const u8) -> svuint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -21938,7 +21938,7 @@ pub unsafe fn svldnf1ub_u32(pg: svbool_t, base: *const u8) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -21951,7 +21951,7 @@ pub unsafe fn svldnf1uh_u32(pg: svbool_t, base: *const u16) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -21964,7 +21964,7 @@ pub unsafe fn svldnf1ub_u64(pg: svbool_t, base: *const u8) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -21977,7 +21977,7 @@ pub unsafe fn svldnf1uh_u64(pg: svbool_t, base: *const u16) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1w))] @@ -21990,7 +21990,7 @@ pub unsafe fn svldnf1uw_u64(pg: svbool_t, base: *const u32) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -22003,7 +22003,7 @@ pub unsafe fn svldnf1ub_vnum_s16(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -22016,7 +22016,7 @@ pub unsafe fn svldnf1ub_vnum_s32(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -22029,7 +22029,7 @@ pub unsafe fn svldnf1uh_vnum_s32(pg: svbool_t, base: *const u16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -22042,7 +22042,7 @@ pub unsafe fn svldnf1ub_vnum_s64(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -22055,7 +22055,7 @@ pub unsafe fn svldnf1uh_vnum_s64(pg: svbool_t, base: *const u16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1w))] @@ -22068,7 +22068,7 @@ pub unsafe fn svldnf1uw_vnum_s64(pg: svbool_t, base: *const u32, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -22081,7 +22081,7 @@ pub unsafe fn svldnf1ub_vnum_u16(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -22094,7 +22094,7 @@ pub unsafe fn svldnf1ub_vnum_u32(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -22107,7 +22107,7 @@ pub unsafe fn svldnf1uh_vnum_u32(pg: svbool_t, base: *const u16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1b))] @@ -22120,7 +22120,7 @@ pub unsafe fn svldnf1ub_vnum_u64(pg: svbool_t, base: *const u8, vnum: i64) -> sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1h))] @@ -22133,7 +22133,7 @@ pub unsafe fn svldnf1uh_vnum_u64(pg: svbool_t, base: *const u16, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`, the first-fault register (`FFR`) and non-faulting behaviour)."] #[doc = " * Result lanes corresponding to inactive FFR lanes (either before or as a result of this intrinsic) have \"CONSTRAINED UNPREDICTABLE\" values, irrespective of predication. Refer to architectural documentation for details."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnf1w))] @@ -22146,7 +22146,7 @@ pub unsafe fn svldnf1uw_vnum_u64(pg: svbool_t, base: *const u32, vnum: i64) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -22163,7 +22163,7 @@ pub unsafe fn svldnt1_f32(pg: svbool_t, base: *const f32) -> svfloat32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -22180,7 +22180,7 @@ pub unsafe fn svldnt1_f64(pg: svbool_t, base: *const f64) -> svfloat64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -22197,7 +22197,7 @@ pub unsafe fn svldnt1_s8(pg: svbool_t, base: *const i8) -> svint8_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -22214,7 +22214,7 @@ pub unsafe fn svldnt1_s16(pg: svbool_t, base: *const i16) -> svint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -22231,7 +22231,7 @@ pub unsafe fn svldnt1_s32(pg: svbool_t, base: *const i32) -> svint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -22248,7 +22248,7 @@ pub unsafe fn svldnt1_s64(pg: svbool_t, base: *const i64) -> svint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -22261,7 +22261,7 @@ pub unsafe fn svldnt1_u8(pg: svbool_t, base: *const u8) -> svuint8_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -22274,7 +22274,7 @@ pub unsafe fn svldnt1_u16(pg: svbool_t, base: *const u16) -> svuint16_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -22287,7 +22287,7 @@ pub unsafe fn svldnt1_u32(pg: svbool_t, base: *const u32) -> svuint32_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -22300,7 +22300,7 @@ pub unsafe fn svldnt1_u64(pg: svbool_t, base: *const u64) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -22313,7 +22313,7 @@ pub unsafe fn svldnt1_vnum_f32(pg: svbool_t, base: *const f32, vnum: i64) -> svf #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -22326,7 +22326,7 @@ pub unsafe fn svldnt1_vnum_f64(pg: svbool_t, base: *const f64, vnum: i64) -> svf #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -22339,7 +22339,7 @@ pub unsafe fn svldnt1_vnum_s8(pg: svbool_t, base: *const i8, vnum: i64) -> svint #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -22352,7 +22352,7 @@ pub unsafe fn svldnt1_vnum_s16(pg: svbool_t, base: *const i16, vnum: i64) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -22365,7 +22365,7 @@ pub unsafe fn svldnt1_vnum_s32(pg: svbool_t, base: *const i32, vnum: i64) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -22378,7 +22378,7 @@ pub unsafe fn svldnt1_vnum_s64(pg: svbool_t, base: *const i64, vnum: i64) -> svi #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -22391,7 +22391,7 @@ pub unsafe fn svldnt1_vnum_u8(pg: svbool_t, base: *const u8, vnum: i64) -> svuin #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -22404,7 +22404,7 @@ pub unsafe fn svldnt1_vnum_u16(pg: svbool_t, base: *const u16, vnum: i64) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -22417,7 +22417,7 @@ pub unsafe fn svldnt1_vnum_u32(pg: svbool_t, base: *const u32, vnum: i64) -> svu #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -22426,7 +22426,7 @@ pub unsafe fn svldnt1_vnum_u64(pg: svbool_t, base: *const u64, vnum: i64) -> svu } #[doc = "Count the number of elements in a full vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntw))] @@ -22435,7 +22435,7 @@ pub fn svlen_f32(_op: svfloat32_t) -> u64 { } #[doc = "Count the number of elements in a full vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntd))] @@ -22444,7 +22444,7 @@ pub fn svlen_f64(_op: svfloat64_t) -> u64 { } #[doc = "Count the number of elements in a full vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rdvl))] @@ -22453,7 +22453,7 @@ pub fn svlen_s8(_op: svint8_t) -> u64 { } #[doc = "Count the number of elements in a full vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnth))] @@ -22462,7 +22462,7 @@ pub fn svlen_s16(_op: svint16_t) -> u64 { } #[doc = "Count the number of elements in a full vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntw))] @@ -22471,7 +22471,7 @@ pub fn svlen_s32(_op: svint32_t) -> u64 { } #[doc = "Count the number of elements in a full vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntd))] @@ -22480,7 +22480,7 @@ pub fn svlen_s64(_op: svint64_t) -> u64 { } #[doc = "Count the number of elements in a full vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rdvl))] @@ -22489,7 +22489,7 @@ pub fn svlen_u8(_op: svuint8_t) -> u64 { } #[doc = "Count the number of elements in a full vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cnth))] @@ -22498,7 +22498,7 @@ pub fn svlen_u16(_op: svuint16_t) -> u64 { } #[doc = "Count the number of elements in a full vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntw))] @@ -22507,7 +22507,7 @@ pub fn svlen_u32(_op: svuint32_t) -> u64 { } #[doc = "Count the number of elements in a full vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlen[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cntd))] @@ -22516,7 +22516,7 @@ pub fn svlen_u64(_op: svuint64_t) -> u64 { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22529,7 +22529,7 @@ pub fn svlsl_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22538,7 +22538,7 @@ pub fn svlsl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22547,7 +22547,7 @@ pub fn svlsl_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22556,7 +22556,7 @@ pub fn svlsl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22565,7 +22565,7 @@ pub fn svlsl_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22574,7 +22574,7 @@ pub fn svlsl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22587,7 +22587,7 @@ pub fn svlsl_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22596,7 +22596,7 @@ pub fn svlsl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22605,7 +22605,7 @@ pub fn svlsl_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22614,7 +22614,7 @@ pub fn svlsl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22623,7 +22623,7 @@ pub fn svlsl_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22632,7 +22632,7 @@ pub fn svlsl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22645,7 +22645,7 @@ pub fn svlsl_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22654,7 +22654,7 @@ pub fn svlsl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22663,7 +22663,7 @@ pub fn svlsl_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22672,7 +22672,7 @@ pub fn svlsl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22681,7 +22681,7 @@ pub fn svlsl_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22690,7 +22690,7 @@ pub fn svlsl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22703,7 +22703,7 @@ pub fn svlsl_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22712,7 +22712,7 @@ pub fn svlsl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22721,7 +22721,7 @@ pub fn svlsl_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22730,7 +22730,7 @@ pub fn svlsl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22739,7 +22739,7 @@ pub fn svlsl_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22748,7 +22748,7 @@ pub fn svlsl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22757,7 +22757,7 @@ pub fn svlsl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22766,7 +22766,7 @@ pub fn svlsl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22775,7 +22775,7 @@ pub fn svlsl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22784,7 +22784,7 @@ pub fn svlsl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22793,7 +22793,7 @@ pub fn svlsl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22802,7 +22802,7 @@ pub fn svlsl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22811,7 +22811,7 @@ pub fn svlsl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22820,7 +22820,7 @@ pub fn svlsl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22829,7 +22829,7 @@ pub fn svlsl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22838,7 +22838,7 @@ pub fn svlsl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22847,7 +22847,7 @@ pub fn svlsl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22856,7 +22856,7 @@ pub fn svlsl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22865,7 +22865,7 @@ pub fn svlsl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22874,7 +22874,7 @@ pub fn svlsl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22883,7 +22883,7 @@ pub fn svlsl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22892,7 +22892,7 @@ pub fn svlsl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22901,7 +22901,7 @@ pub fn svlsl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22910,7 +22910,7 @@ pub fn svlsl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22919,7 +22919,7 @@ pub fn svlsl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22928,7 +22928,7 @@ pub fn svlsl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22937,7 +22937,7 @@ pub fn svlsl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22946,7 +22946,7 @@ pub fn svlsl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22955,7 +22955,7 @@ pub fn svlsl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22964,7 +22964,7 @@ pub fn svlsl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22980,7 +22980,7 @@ pub fn svlsl_wide_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22989,7 +22989,7 @@ pub fn svlsl_wide_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -22998,7 +22998,7 @@ pub fn svlsl_wide_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23007,7 +23007,7 @@ pub fn svlsl_wide_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23016,7 +23016,7 @@ pub fn svlsl_wide_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint64_t) -> svint8_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23025,7 +23025,7 @@ pub fn svlsl_wide_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u64) -> svint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23041,7 +23041,7 @@ pub fn svlsl_wide_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint1 } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23050,7 +23050,7 @@ pub fn svlsl_wide_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23059,7 +23059,7 @@ pub fn svlsl_wide_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint1 } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23068,7 +23068,7 @@ pub fn svlsl_wide_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23077,7 +23077,7 @@ pub fn svlsl_wide_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint64_t) -> svint1 } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23086,7 +23086,7 @@ pub fn svlsl_wide_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u64) -> svint16_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23102,7 +23102,7 @@ pub fn svlsl_wide_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint3 } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23111,7 +23111,7 @@ pub fn svlsl_wide_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23120,7 +23120,7 @@ pub fn svlsl_wide_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint3 } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23129,7 +23129,7 @@ pub fn svlsl_wide_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23138,7 +23138,7 @@ pub fn svlsl_wide_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint64_t) -> svint3 } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23147,7 +23147,7 @@ pub fn svlsl_wide_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u64) -> svint32_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23156,7 +23156,7 @@ pub fn svlsl_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8 } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23165,7 +23165,7 @@ pub fn svlsl_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23174,7 +23174,7 @@ pub fn svlsl_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8 } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23183,7 +23183,7 @@ pub fn svlsl_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23192,7 +23192,7 @@ pub fn svlsl_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8 } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23201,7 +23201,7 @@ pub fn svlsl_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23210,7 +23210,7 @@ pub fn svlsl_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23219,7 +23219,7 @@ pub fn svlsl_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23228,7 +23228,7 @@ pub fn svlsl_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23237,7 +23237,7 @@ pub fn svlsl_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23246,7 +23246,7 @@ pub fn svlsl_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23255,7 +23255,7 @@ pub fn svlsl_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23264,7 +23264,7 @@ pub fn svlsl_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23273,7 +23273,7 @@ pub fn svlsl_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23282,7 +23282,7 @@ pub fn svlsl_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23291,7 +23291,7 @@ pub fn svlsl_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23300,7 +23300,7 @@ pub fn svlsl_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsl_wide[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsl))] @@ -23309,7 +23309,7 @@ pub fn svlsl_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23322,7 +23322,7 @@ pub fn svlsr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23331,7 +23331,7 @@ pub fn svlsr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23340,7 +23340,7 @@ pub fn svlsr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23349,7 +23349,7 @@ pub fn svlsr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23358,7 +23358,7 @@ pub fn svlsr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23367,7 +23367,7 @@ pub fn svlsr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23380,7 +23380,7 @@ pub fn svlsr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23389,7 +23389,7 @@ pub fn svlsr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23398,7 +23398,7 @@ pub fn svlsr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23407,7 +23407,7 @@ pub fn svlsr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23416,7 +23416,7 @@ pub fn svlsr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23425,7 +23425,7 @@ pub fn svlsr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23438,7 +23438,7 @@ pub fn svlsr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23447,7 +23447,7 @@ pub fn svlsr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23456,7 +23456,7 @@ pub fn svlsr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23465,7 +23465,7 @@ pub fn svlsr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23474,7 +23474,7 @@ pub fn svlsr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23483,7 +23483,7 @@ pub fn svlsr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23496,7 +23496,7 @@ pub fn svlsr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23505,7 +23505,7 @@ pub fn svlsr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23514,7 +23514,7 @@ pub fn svlsr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23523,7 +23523,7 @@ pub fn svlsr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23532,7 +23532,7 @@ pub fn svlsr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23541,7 +23541,7 @@ pub fn svlsr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23557,7 +23557,7 @@ pub fn svlsr_wide_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8 } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23566,7 +23566,7 @@ pub fn svlsr_wide_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23575,7 +23575,7 @@ pub fn svlsr_wide_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8 } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23584,7 +23584,7 @@ pub fn svlsr_wide_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23593,7 +23593,7 @@ pub fn svlsr_wide_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint64_t) -> svuint8 } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23602,7 +23602,7 @@ pub fn svlsr_wide_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u64) -> svuint8_t { } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23618,7 +23618,7 @@ pub fn svlsr_wide_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23627,7 +23627,7 @@ pub fn svlsr_wide_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23636,7 +23636,7 @@ pub fn svlsr_wide_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23645,7 +23645,7 @@ pub fn svlsr_wide_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23654,7 +23654,7 @@ pub fn svlsr_wide_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23663,7 +23663,7 @@ pub fn svlsr_wide_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u64) -> svuint16_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23679,7 +23679,7 @@ pub fn svlsr_wide_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23688,7 +23688,7 @@ pub fn svlsr_wide_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23697,7 +23697,7 @@ pub fn svlsr_wide_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23706,7 +23706,7 @@ pub fn svlsr_wide_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23715,7 +23715,7 @@ pub fn svlsr_wide_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint64_t) -> svuin } #[doc = "Logical shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlsr_wide[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(lsr))] @@ -23724,7 +23724,7 @@ pub fn svlsr_wide_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u64) -> svuint32_t } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23747,7 +23747,7 @@ pub fn svmad_f32_m( } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23756,7 +23756,7 @@ pub fn svmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23770,7 +23770,7 @@ pub fn svmad_f32_x( } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23779,7 +23779,7 @@ pub fn svmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23793,7 +23793,7 @@ pub fn svmad_f32_z( } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23802,7 +23802,7 @@ pub fn svmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23825,7 +23825,7 @@ pub fn svmad_f64_m( } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23834,7 +23834,7 @@ pub fn svmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23848,7 +23848,7 @@ pub fn svmad_f64_x( } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23857,7 +23857,7 @@ pub fn svmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23871,7 +23871,7 @@ pub fn svmad_f64_z( } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmad))] @@ -23880,7 +23880,7 @@ pub fn svmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23893,7 +23893,7 @@ pub fn svmad_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23902,7 +23902,7 @@ pub fn svmad_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23911,7 +23911,7 @@ pub fn svmad_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23920,7 +23920,7 @@ pub fn svmad_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23929,7 +23929,7 @@ pub fn svmad_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23938,7 +23938,7 @@ pub fn svmad_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23952,7 +23952,7 @@ pub fn svmad_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23961,7 +23961,7 @@ pub fn svmad_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23970,7 +23970,7 @@ pub fn svmad_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23979,7 +23979,7 @@ pub fn svmad_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23988,7 +23988,7 @@ pub fn svmad_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -23997,7 +23997,7 @@ pub fn svmad_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24011,7 +24011,7 @@ pub fn svmad_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24020,7 +24020,7 @@ pub fn svmad_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24029,7 +24029,7 @@ pub fn svmad_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24038,7 +24038,7 @@ pub fn svmad_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24047,7 +24047,7 @@ pub fn svmad_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24056,7 +24056,7 @@ pub fn svmad_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24070,7 +24070,7 @@ pub fn svmad_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24079,7 +24079,7 @@ pub fn svmad_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24088,7 +24088,7 @@ pub fn svmad_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24097,7 +24097,7 @@ pub fn svmad_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24106,7 +24106,7 @@ pub fn svmad_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24115,7 +24115,7 @@ pub fn svmad_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24124,7 +24124,7 @@ pub fn svmad_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24133,7 +24133,7 @@ pub fn svmad_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24142,7 +24142,7 @@ pub fn svmad_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24151,7 +24151,7 @@ pub fn svmad_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24160,7 +24160,7 @@ pub fn svmad_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24169,7 +24169,7 @@ pub fn svmad_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24178,7 +24178,7 @@ pub fn svmad_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24187,7 +24187,7 @@ pub fn svmad_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24196,7 +24196,7 @@ pub fn svmad_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24205,7 +24205,7 @@ pub fn svmad_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24214,7 +24214,7 @@ pub fn svmad_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24223,7 +24223,7 @@ pub fn svmad_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24232,7 +24232,7 @@ pub fn svmad_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24241,7 +24241,7 @@ pub fn svmad_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24250,7 +24250,7 @@ pub fn svmad_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24259,7 +24259,7 @@ pub fn svmad_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24268,7 +24268,7 @@ pub fn svmad_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24277,7 +24277,7 @@ pub fn svmad_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24286,7 +24286,7 @@ pub fn svmad_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24295,7 +24295,7 @@ pub fn svmad_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24304,7 +24304,7 @@ pub fn svmad_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24313,7 +24313,7 @@ pub fn svmad_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24322,7 +24322,7 @@ pub fn svmad_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmad[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mad))] @@ -24331,7 +24331,7 @@ pub fn svmad_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24344,7 +24344,7 @@ pub fn svmax_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24353,7 +24353,7 @@ pub fn svmax_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24362,7 +24362,7 @@ pub fn svmax_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24371,7 +24371,7 @@ pub fn svmax_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24380,7 +24380,7 @@ pub fn svmax_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24389,7 +24389,7 @@ pub fn svmax_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24402,7 +24402,7 @@ pub fn svmax_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24411,7 +24411,7 @@ pub fn svmax_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24420,7 +24420,7 @@ pub fn svmax_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24429,7 +24429,7 @@ pub fn svmax_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24438,7 +24438,7 @@ pub fn svmax_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmax))] @@ -24447,7 +24447,7 @@ pub fn svmax_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24460,7 +24460,7 @@ pub fn svmax_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24469,7 +24469,7 @@ pub fn svmax_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24478,7 +24478,7 @@ pub fn svmax_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24487,7 +24487,7 @@ pub fn svmax_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24496,7 +24496,7 @@ pub fn svmax_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24505,7 +24505,7 @@ pub fn svmax_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24518,7 +24518,7 @@ pub fn svmax_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24527,7 +24527,7 @@ pub fn svmax_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24536,7 +24536,7 @@ pub fn svmax_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24545,7 +24545,7 @@ pub fn svmax_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24554,7 +24554,7 @@ pub fn svmax_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24563,7 +24563,7 @@ pub fn svmax_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24576,7 +24576,7 @@ pub fn svmax_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24585,7 +24585,7 @@ pub fn svmax_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24594,7 +24594,7 @@ pub fn svmax_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24603,7 +24603,7 @@ pub fn svmax_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24612,7 +24612,7 @@ pub fn svmax_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24621,7 +24621,7 @@ pub fn svmax_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24634,7 +24634,7 @@ pub fn svmax_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24643,7 +24643,7 @@ pub fn svmax_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24652,7 +24652,7 @@ pub fn svmax_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24661,7 +24661,7 @@ pub fn svmax_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24670,7 +24670,7 @@ pub fn svmax_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smax))] @@ -24679,7 +24679,7 @@ pub fn svmax_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24692,7 +24692,7 @@ pub fn svmax_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24701,7 +24701,7 @@ pub fn svmax_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24710,7 +24710,7 @@ pub fn svmax_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24719,7 +24719,7 @@ pub fn svmax_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24728,7 +24728,7 @@ pub fn svmax_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24737,7 +24737,7 @@ pub fn svmax_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24750,7 +24750,7 @@ pub fn svmax_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24759,7 +24759,7 @@ pub fn svmax_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24768,7 +24768,7 @@ pub fn svmax_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24777,7 +24777,7 @@ pub fn svmax_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24786,7 +24786,7 @@ pub fn svmax_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24795,7 +24795,7 @@ pub fn svmax_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24808,7 +24808,7 @@ pub fn svmax_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24817,7 +24817,7 @@ pub fn svmax_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24826,7 +24826,7 @@ pub fn svmax_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24835,7 +24835,7 @@ pub fn svmax_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24844,7 +24844,7 @@ pub fn svmax_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24853,7 +24853,7 @@ pub fn svmax_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24866,7 +24866,7 @@ pub fn svmax_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24875,7 +24875,7 @@ pub fn svmax_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24884,7 +24884,7 @@ pub fn svmax_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24893,7 +24893,7 @@ pub fn svmax_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24902,7 +24902,7 @@ pub fn svmax_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Maximum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmax[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umax))] @@ -24911,7 +24911,7 @@ pub fn svmax_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -24924,7 +24924,7 @@ pub fn svmaxnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloa } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -24933,7 +24933,7 @@ pub fn svmaxnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -24942,7 +24942,7 @@ pub fn svmaxnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloa } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -24951,7 +24951,7 @@ pub fn svmaxnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -24960,7 +24960,7 @@ pub fn svmaxnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloa } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -24969,7 +24969,7 @@ pub fn svmaxnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -24982,7 +24982,7 @@ pub fn svmaxnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloa } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -24991,7 +24991,7 @@ pub fn svmaxnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -25000,7 +25000,7 @@ pub fn svmaxnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloa } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -25009,7 +25009,7 @@ pub fn svmaxnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -25018,7 +25018,7 @@ pub fn svmaxnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloa } #[doc = "Maximum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnm[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnm))] @@ -25027,7 +25027,7 @@ pub fn svmaxnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t } #[doc = "Maximum number reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnmv))] @@ -25043,7 +25043,7 @@ pub fn svmaxnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { } #[doc = "Maximum number reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmv[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnmv))] @@ -25059,7 +25059,7 @@ pub fn svmaxnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { } #[doc = "Maximum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxv))] @@ -25072,7 +25072,7 @@ pub fn svmaxv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { } #[doc = "Maximum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxv))] @@ -25085,7 +25085,7 @@ pub fn svmaxv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { } #[doc = "Maximum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxv))] @@ -25098,7 +25098,7 @@ pub fn svmaxv_s8(pg: svbool_t, op: svint8_t) -> i8 { } #[doc = "Maximum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxv))] @@ -25111,7 +25111,7 @@ pub fn svmaxv_s16(pg: svbool_t, op: svint16_t) -> i16 { } #[doc = "Maximum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxv))] @@ -25124,7 +25124,7 @@ pub fn svmaxv_s32(pg: svbool_t, op: svint32_t) -> i32 { } #[doc = "Maximum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxv))] @@ -25137,7 +25137,7 @@ pub fn svmaxv_s64(pg: svbool_t, op: svint64_t) -> i64 { } #[doc = "Maximum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxv))] @@ -25150,7 +25150,7 @@ pub fn svmaxv_u8(pg: svbool_t, op: svuint8_t) -> u8 { } #[doc = "Maximum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxv))] @@ -25163,7 +25163,7 @@ pub fn svmaxv_u16(pg: svbool_t, op: svuint16_t) -> u16 { } #[doc = "Maximum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxv))] @@ -25176,7 +25176,7 @@ pub fn svmaxv_u32(pg: svbool_t, op: svuint32_t) -> u32 { } #[doc = "Maximum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxv[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxv))] @@ -25189,7 +25189,7 @@ pub fn svmaxv_u64(pg: svbool_t, op: svuint64_t) -> u64 { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25202,7 +25202,7 @@ pub fn svmin_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25211,7 +25211,7 @@ pub fn svmin_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25220,7 +25220,7 @@ pub fn svmin_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25229,7 +25229,7 @@ pub fn svmin_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25238,7 +25238,7 @@ pub fn svmin_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25247,7 +25247,7 @@ pub fn svmin_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25260,7 +25260,7 @@ pub fn svmin_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25269,7 +25269,7 @@ pub fn svmin_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25278,7 +25278,7 @@ pub fn svmin_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25287,7 +25287,7 @@ pub fn svmin_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25296,7 +25296,7 @@ pub fn svmin_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmin))] @@ -25305,7 +25305,7 @@ pub fn svmin_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25318,7 +25318,7 @@ pub fn svmin_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25327,7 +25327,7 @@ pub fn svmin_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25336,7 +25336,7 @@ pub fn svmin_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25345,7 +25345,7 @@ pub fn svmin_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25354,7 +25354,7 @@ pub fn svmin_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25363,7 +25363,7 @@ pub fn svmin_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25376,7 +25376,7 @@ pub fn svmin_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25385,7 +25385,7 @@ pub fn svmin_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25394,7 +25394,7 @@ pub fn svmin_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25403,7 +25403,7 @@ pub fn svmin_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25412,7 +25412,7 @@ pub fn svmin_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25421,7 +25421,7 @@ pub fn svmin_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25434,7 +25434,7 @@ pub fn svmin_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25443,7 +25443,7 @@ pub fn svmin_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25452,7 +25452,7 @@ pub fn svmin_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25461,7 +25461,7 @@ pub fn svmin_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25470,7 +25470,7 @@ pub fn svmin_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25479,7 +25479,7 @@ pub fn svmin_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25492,7 +25492,7 @@ pub fn svmin_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25501,7 +25501,7 @@ pub fn svmin_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25510,7 +25510,7 @@ pub fn svmin_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25519,7 +25519,7 @@ pub fn svmin_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25528,7 +25528,7 @@ pub fn svmin_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smin))] @@ -25537,7 +25537,7 @@ pub fn svmin_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25550,7 +25550,7 @@ pub fn svmin_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25559,7 +25559,7 @@ pub fn svmin_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25568,7 +25568,7 @@ pub fn svmin_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25577,7 +25577,7 @@ pub fn svmin_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25586,7 +25586,7 @@ pub fn svmin_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25595,7 +25595,7 @@ pub fn svmin_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25608,7 +25608,7 @@ pub fn svmin_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25617,7 +25617,7 @@ pub fn svmin_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25626,7 +25626,7 @@ pub fn svmin_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25635,7 +25635,7 @@ pub fn svmin_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25644,7 +25644,7 @@ pub fn svmin_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25653,7 +25653,7 @@ pub fn svmin_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25666,7 +25666,7 @@ pub fn svmin_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25675,7 +25675,7 @@ pub fn svmin_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25684,7 +25684,7 @@ pub fn svmin_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25693,7 +25693,7 @@ pub fn svmin_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25702,7 +25702,7 @@ pub fn svmin_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25711,7 +25711,7 @@ pub fn svmin_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25724,7 +25724,7 @@ pub fn svmin_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25733,7 +25733,7 @@ pub fn svmin_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25742,7 +25742,7 @@ pub fn svmin_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25751,7 +25751,7 @@ pub fn svmin_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25760,7 +25760,7 @@ pub fn svmin_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Minimum"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmin[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umin))] @@ -25769,7 +25769,7 @@ pub fn svmin_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25782,7 +25782,7 @@ pub fn svminnm_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloa } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25791,7 +25791,7 @@ pub fn svminnm_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25800,7 +25800,7 @@ pub fn svminnm_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloa } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25809,7 +25809,7 @@ pub fn svminnm_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25818,7 +25818,7 @@ pub fn svminnm_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloa } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25827,7 +25827,7 @@ pub fn svminnm_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25840,7 +25840,7 @@ pub fn svminnm_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloa } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25849,7 +25849,7 @@ pub fn svminnm_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25858,7 +25858,7 @@ pub fn svminnm_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloa } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25867,7 +25867,7 @@ pub fn svminnm_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25876,7 +25876,7 @@ pub fn svminnm_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloa } #[doc = "Minimum number"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnm[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnm))] @@ -25885,7 +25885,7 @@ pub fn svminnm_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t } #[doc = "Minimum number reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnmv))] @@ -25901,7 +25901,7 @@ pub fn svminnmv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { } #[doc = "Minimum number reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmv[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnmv))] @@ -25917,7 +25917,7 @@ pub fn svminnmv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { } #[doc = "Minimum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminv))] @@ -25930,7 +25930,7 @@ pub fn svminv_f32(pg: svbool_t, op: svfloat32_t) -> f32 { } #[doc = "Minimum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminv))] @@ -25943,7 +25943,7 @@ pub fn svminv_f64(pg: svbool_t, op: svfloat64_t) -> f64 { } #[doc = "Minimum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminv))] @@ -25956,7 +25956,7 @@ pub fn svminv_s8(pg: svbool_t, op: svint8_t) -> i8 { } #[doc = "Minimum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminv))] @@ -25969,7 +25969,7 @@ pub fn svminv_s16(pg: svbool_t, op: svint16_t) -> i16 { } #[doc = "Minimum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminv))] @@ -25982,7 +25982,7 @@ pub fn svminv_s32(pg: svbool_t, op: svint32_t) -> i32 { } #[doc = "Minimum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminv))] @@ -25995,7 +25995,7 @@ pub fn svminv_s64(pg: svbool_t, op: svint64_t) -> i64 { } #[doc = "Minimum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminv))] @@ -26008,7 +26008,7 @@ pub fn svminv_u8(pg: svbool_t, op: svuint8_t) -> u8 { } #[doc = "Minimum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminv))] @@ -26021,7 +26021,7 @@ pub fn svminv_u16(pg: svbool_t, op: svuint16_t) -> u16 { } #[doc = "Minimum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminv))] @@ -26034,7 +26034,7 @@ pub fn svminv_u32(pg: svbool_t, op: svuint32_t) -> u32 { } #[doc = "Minimum reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminv[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminv))] @@ -26047,7 +26047,7 @@ pub fn svminv_u64(pg: svbool_t, op: svuint64_t) -> u64 { } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26070,7 +26070,7 @@ pub fn svmla_f32_m( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26079,7 +26079,7 @@ pub fn svmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26093,7 +26093,7 @@ pub fn svmla_f32_x( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26102,7 +26102,7 @@ pub fn svmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26116,7 +26116,7 @@ pub fn svmla_f32_z( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26125,7 +26125,7 @@ pub fn svmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26148,7 +26148,7 @@ pub fn svmla_f64_m( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26157,7 +26157,7 @@ pub fn svmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26171,7 +26171,7 @@ pub fn svmla_f64_x( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26180,7 +26180,7 @@ pub fn svmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26194,7 +26194,7 @@ pub fn svmla_f64_z( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla))] @@ -26203,7 +26203,7 @@ pub fn svmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26216,7 +26216,7 @@ pub fn svmla_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26225,7 +26225,7 @@ pub fn svmla_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26234,7 +26234,7 @@ pub fn svmla_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26243,7 +26243,7 @@ pub fn svmla_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26252,7 +26252,7 @@ pub fn svmla_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26261,7 +26261,7 @@ pub fn svmla_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26275,7 +26275,7 @@ pub fn svmla_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26284,7 +26284,7 @@ pub fn svmla_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26293,7 +26293,7 @@ pub fn svmla_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26302,7 +26302,7 @@ pub fn svmla_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26311,7 +26311,7 @@ pub fn svmla_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26320,7 +26320,7 @@ pub fn svmla_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26334,7 +26334,7 @@ pub fn svmla_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26343,7 +26343,7 @@ pub fn svmla_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26352,7 +26352,7 @@ pub fn svmla_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26361,7 +26361,7 @@ pub fn svmla_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26370,7 +26370,7 @@ pub fn svmla_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26379,7 +26379,7 @@ pub fn svmla_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26393,7 +26393,7 @@ pub fn svmla_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26402,7 +26402,7 @@ pub fn svmla_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26411,7 +26411,7 @@ pub fn svmla_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26420,7 +26420,7 @@ pub fn svmla_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26429,7 +26429,7 @@ pub fn svmla_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26438,7 +26438,7 @@ pub fn svmla_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26447,7 +26447,7 @@ pub fn svmla_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26456,7 +26456,7 @@ pub fn svmla_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26465,7 +26465,7 @@ pub fn svmla_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26474,7 +26474,7 @@ pub fn svmla_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26483,7 +26483,7 @@ pub fn svmla_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26492,7 +26492,7 @@ pub fn svmla_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26501,7 +26501,7 @@ pub fn svmla_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26510,7 +26510,7 @@ pub fn svmla_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26519,7 +26519,7 @@ pub fn svmla_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26528,7 +26528,7 @@ pub fn svmla_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26537,7 +26537,7 @@ pub fn svmla_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26546,7 +26546,7 @@ pub fn svmla_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26555,7 +26555,7 @@ pub fn svmla_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26564,7 +26564,7 @@ pub fn svmla_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26573,7 +26573,7 @@ pub fn svmla_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26582,7 +26582,7 @@ pub fn svmla_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26591,7 +26591,7 @@ pub fn svmla_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26600,7 +26600,7 @@ pub fn svmla_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26609,7 +26609,7 @@ pub fn svmla_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26618,7 +26618,7 @@ pub fn svmla_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26627,7 +26627,7 @@ pub fn svmla_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26636,7 +26636,7 @@ pub fn svmla_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26645,7 +26645,7 @@ pub fn svmla_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla))] @@ -26654,7 +26654,7 @@ pub fn svmla_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] @@ -26680,7 +26680,7 @@ pub fn svmla_lane_f32( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmla, IMM_INDEX = 0))] @@ -26706,7 +26706,7 @@ pub fn svmla_lane_f64( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26729,7 +26729,7 @@ pub fn svmls_f32_m( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26738,7 +26738,7 @@ pub fn svmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26752,7 +26752,7 @@ pub fn svmls_f32_x( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26761,7 +26761,7 @@ pub fn svmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26775,7 +26775,7 @@ pub fn svmls_f32_z( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26784,7 +26784,7 @@ pub fn svmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26807,7 +26807,7 @@ pub fn svmls_f64_m( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26816,7 +26816,7 @@ pub fn svmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26830,7 +26830,7 @@ pub fn svmls_f64_x( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26839,7 +26839,7 @@ pub fn svmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26853,7 +26853,7 @@ pub fn svmls_f64_z( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls))] @@ -26862,7 +26862,7 @@ pub fn svmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26875,7 +26875,7 @@ pub fn svmls_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26884,7 +26884,7 @@ pub fn svmls_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26893,7 +26893,7 @@ pub fn svmls_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26902,7 +26902,7 @@ pub fn svmls_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26911,7 +26911,7 @@ pub fn svmls_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26920,7 +26920,7 @@ pub fn svmls_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26934,7 +26934,7 @@ pub fn svmls_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26943,7 +26943,7 @@ pub fn svmls_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26952,7 +26952,7 @@ pub fn svmls_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26961,7 +26961,7 @@ pub fn svmls_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26970,7 +26970,7 @@ pub fn svmls_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26979,7 +26979,7 @@ pub fn svmls_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -26993,7 +26993,7 @@ pub fn svmls_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27002,7 +27002,7 @@ pub fn svmls_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27011,7 +27011,7 @@ pub fn svmls_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27020,7 +27020,7 @@ pub fn svmls_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27029,7 +27029,7 @@ pub fn svmls_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27038,7 +27038,7 @@ pub fn svmls_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27052,7 +27052,7 @@ pub fn svmls_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27061,7 +27061,7 @@ pub fn svmls_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27070,7 +27070,7 @@ pub fn svmls_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27079,7 +27079,7 @@ pub fn svmls_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27088,7 +27088,7 @@ pub fn svmls_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27097,7 +27097,7 @@ pub fn svmls_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27106,7 +27106,7 @@ pub fn svmls_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27115,7 +27115,7 @@ pub fn svmls_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27124,7 +27124,7 @@ pub fn svmls_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27133,7 +27133,7 @@ pub fn svmls_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27142,7 +27142,7 @@ pub fn svmls_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27151,7 +27151,7 @@ pub fn svmls_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27160,7 +27160,7 @@ pub fn svmls_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27169,7 +27169,7 @@ pub fn svmls_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27178,7 +27178,7 @@ pub fn svmls_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27187,7 +27187,7 @@ pub fn svmls_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27196,7 +27196,7 @@ pub fn svmls_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27205,7 +27205,7 @@ pub fn svmls_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27214,7 +27214,7 @@ pub fn svmls_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27223,7 +27223,7 @@ pub fn svmls_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27232,7 +27232,7 @@ pub fn svmls_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27241,7 +27241,7 @@ pub fn svmls_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27250,7 +27250,7 @@ pub fn svmls_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27259,7 +27259,7 @@ pub fn svmls_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27268,7 +27268,7 @@ pub fn svmls_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27277,7 +27277,7 @@ pub fn svmls_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27286,7 +27286,7 @@ pub fn svmls_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27295,7 +27295,7 @@ pub fn svmls_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27304,7 +27304,7 @@ pub fn svmls_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls))] @@ -27313,7 +27313,7 @@ pub fn svmls_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] @@ -27339,7 +27339,7 @@ pub fn svmls_lane_f32( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmls, IMM_INDEX = 0))] @@ -27365,7 +27365,7 @@ pub fn svmls_lane_f64( } #[doc = "Matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f32mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmmla))] @@ -27378,7 +27378,7 @@ pub fn svmmla_f32(op1: svfloat32_t, op2: svfloat32_t, op3: svfloat32_t) -> svflo } #[doc = "Matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmmla))] @@ -27391,7 +27391,7 @@ pub fn svmmla_f64(op1: svfloat64_t, op2: svfloat64_t, op3: svfloat64_t) -> svflo } #[doc = "Matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,i8mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smmla))] @@ -27404,7 +27404,7 @@ pub fn svmmla_s32(op1: svint32_t, op2: svint8_t, op3: svint8_t) -> svint32_t { } #[doc = "Matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmmla[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,i8mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ummla))] @@ -27417,7 +27417,7 @@ pub fn svmmla_u32(op1: svuint32_t, op2: svuint8_t, op3: svuint8_t) -> svuint32_t } #[doc = "Move"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmov[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mov))] @@ -27426,7 +27426,7 @@ pub fn svmov_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27449,7 +27449,7 @@ pub fn svmsb_f32_m( } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27458,7 +27458,7 @@ pub fn svmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27472,7 +27472,7 @@ pub fn svmsb_f32_x( } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27481,7 +27481,7 @@ pub fn svmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27495,7 +27495,7 @@ pub fn svmsb_f32_z( } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27504,7 +27504,7 @@ pub fn svmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27527,7 +27527,7 @@ pub fn svmsb_f64_m( } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27536,7 +27536,7 @@ pub fn svmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27550,7 +27550,7 @@ pub fn svmsb_f64_x( } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27559,7 +27559,7 @@ pub fn svmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27573,7 +27573,7 @@ pub fn svmsb_f64_z( } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmsb))] @@ -27582,7 +27582,7 @@ pub fn svmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27595,7 +27595,7 @@ pub fn svmsb_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27604,7 +27604,7 @@ pub fn svmsb_n_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27613,7 +27613,7 @@ pub fn svmsb_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27622,7 +27622,7 @@ pub fn svmsb_n_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27631,7 +27631,7 @@ pub fn svmsb_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: svint8_t) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27640,7 +27640,7 @@ pub fn svmsb_n_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t, op3: i8) -> svin } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27654,7 +27654,7 @@ pub fn svmsb_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27663,7 +27663,7 @@ pub fn svmsb_n_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27672,7 +27672,7 @@ pub fn svmsb_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27681,7 +27681,7 @@ pub fn svmsb_n_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27690,7 +27690,7 @@ pub fn svmsb_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: svint16_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27699,7 +27699,7 @@ pub fn svmsb_n_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t, op3: i16) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27713,7 +27713,7 @@ pub fn svmsb_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27722,7 +27722,7 @@ pub fn svmsb_n_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27731,7 +27731,7 @@ pub fn svmsb_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27740,7 +27740,7 @@ pub fn svmsb_n_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27749,7 +27749,7 @@ pub fn svmsb_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: svint32_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27758,7 +27758,7 @@ pub fn svmsb_n_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t, op3: i32) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27772,7 +27772,7 @@ pub fn svmsb_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27781,7 +27781,7 @@ pub fn svmsb_n_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27790,7 +27790,7 @@ pub fn svmsb_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27799,7 +27799,7 @@ pub fn svmsb_n_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27808,7 +27808,7 @@ pub fn svmsb_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: svint64_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27817,7 +27817,7 @@ pub fn svmsb_n_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t, op3: i64) -> } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27826,7 +27826,7 @@ pub fn svmsb_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27835,7 +27835,7 @@ pub fn svmsb_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27844,7 +27844,7 @@ pub fn svmsb_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27853,7 +27853,7 @@ pub fn svmsb_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27862,7 +27862,7 @@ pub fn svmsb_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27871,7 +27871,7 @@ pub fn svmsb_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t, op3: u8) -> sv } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27880,7 +27880,7 @@ pub fn svmsb_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27889,7 +27889,7 @@ pub fn svmsb_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27898,7 +27898,7 @@ pub fn svmsb_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27907,7 +27907,7 @@ pub fn svmsb_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27916,7 +27916,7 @@ pub fn svmsb_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: svuint16 } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27925,7 +27925,7 @@ pub fn svmsb_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t, op3: u16) - } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27934,7 +27934,7 @@ pub fn svmsb_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27943,7 +27943,7 @@ pub fn svmsb_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27952,7 +27952,7 @@ pub fn svmsb_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27961,7 +27961,7 @@ pub fn svmsb_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27970,7 +27970,7 @@ pub fn svmsb_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: svuint32 } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27979,7 +27979,7 @@ pub fn svmsb_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t, op3: u32) - } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27988,7 +27988,7 @@ pub fn svmsb_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -27997,7 +27997,7 @@ pub fn svmsb_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -28006,7 +28006,7 @@ pub fn svmsb_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -28015,7 +28015,7 @@ pub fn svmsb_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -28024,7 +28024,7 @@ pub fn svmsb_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: svuint64 } #[doc = "Multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmsb[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(msb))] @@ -28033,7 +28033,7 @@ pub fn svmsb_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t, op3: u64) - } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28046,7 +28046,7 @@ pub fn svmul_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28055,7 +28055,7 @@ pub fn svmul_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28064,7 +28064,7 @@ pub fn svmul_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28073,7 +28073,7 @@ pub fn svmul_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28082,7 +28082,7 @@ pub fn svmul_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28091,7 +28091,7 @@ pub fn svmul_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28104,7 +28104,7 @@ pub fn svmul_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28113,7 +28113,7 @@ pub fn svmul_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28122,7 +28122,7 @@ pub fn svmul_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28131,7 +28131,7 @@ pub fn svmul_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28140,7 +28140,7 @@ pub fn svmul_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul))] @@ -28149,7 +28149,7 @@ pub fn svmul_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28162,7 +28162,7 @@ pub fn svmul_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28171,7 +28171,7 @@ pub fn svmul_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28180,7 +28180,7 @@ pub fn svmul_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28189,7 +28189,7 @@ pub fn svmul_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28198,7 +28198,7 @@ pub fn svmul_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28207,7 +28207,7 @@ pub fn svmul_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28220,7 +28220,7 @@ pub fn svmul_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28229,7 +28229,7 @@ pub fn svmul_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28238,7 +28238,7 @@ pub fn svmul_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28247,7 +28247,7 @@ pub fn svmul_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28256,7 +28256,7 @@ pub fn svmul_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28265,7 +28265,7 @@ pub fn svmul_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28278,7 +28278,7 @@ pub fn svmul_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28287,7 +28287,7 @@ pub fn svmul_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28296,7 +28296,7 @@ pub fn svmul_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28305,7 +28305,7 @@ pub fn svmul_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28314,7 +28314,7 @@ pub fn svmul_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28323,7 +28323,7 @@ pub fn svmul_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28336,7 +28336,7 @@ pub fn svmul_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28345,7 +28345,7 @@ pub fn svmul_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28354,7 +28354,7 @@ pub fn svmul_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28363,7 +28363,7 @@ pub fn svmul_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28372,7 +28372,7 @@ pub fn svmul_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28381,7 +28381,7 @@ pub fn svmul_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28390,7 +28390,7 @@ pub fn svmul_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28399,7 +28399,7 @@ pub fn svmul_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28408,7 +28408,7 @@ pub fn svmul_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28417,7 +28417,7 @@ pub fn svmul_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28426,7 +28426,7 @@ pub fn svmul_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28435,7 +28435,7 @@ pub fn svmul_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28444,7 +28444,7 @@ pub fn svmul_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28453,7 +28453,7 @@ pub fn svmul_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28462,7 +28462,7 @@ pub fn svmul_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28471,7 +28471,7 @@ pub fn svmul_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28480,7 +28480,7 @@ pub fn svmul_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28489,7 +28489,7 @@ pub fn svmul_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28498,7 +28498,7 @@ pub fn svmul_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28507,7 +28507,7 @@ pub fn svmul_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28516,7 +28516,7 @@ pub fn svmul_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28525,7 +28525,7 @@ pub fn svmul_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28534,7 +28534,7 @@ pub fn svmul_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28543,7 +28543,7 @@ pub fn svmul_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28552,7 +28552,7 @@ pub fn svmul_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28561,7 +28561,7 @@ pub fn svmul_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28570,7 +28570,7 @@ pub fn svmul_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28579,7 +28579,7 @@ pub fn svmul_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28588,7 +28588,7 @@ pub fn svmul_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul))] @@ -28597,7 +28597,7 @@ pub fn svmul_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28610,7 +28610,7 @@ pub fn svmulh_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28619,7 +28619,7 @@ pub fn svmulh_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28628,7 +28628,7 @@ pub fn svmulh_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28637,7 +28637,7 @@ pub fn svmulh_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28646,7 +28646,7 @@ pub fn svmulh_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28655,7 +28655,7 @@ pub fn svmulh_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28668,7 +28668,7 @@ pub fn svmulh_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28677,7 +28677,7 @@ pub fn svmulh_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28686,7 +28686,7 @@ pub fn svmulh_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28695,7 +28695,7 @@ pub fn svmulh_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28704,7 +28704,7 @@ pub fn svmulh_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28713,7 +28713,7 @@ pub fn svmulh_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28726,7 +28726,7 @@ pub fn svmulh_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28735,7 +28735,7 @@ pub fn svmulh_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28744,7 +28744,7 @@ pub fn svmulh_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28753,7 +28753,7 @@ pub fn svmulh_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28762,7 +28762,7 @@ pub fn svmulh_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28771,7 +28771,7 @@ pub fn svmulh_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28784,7 +28784,7 @@ pub fn svmulh_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28793,7 +28793,7 @@ pub fn svmulh_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28802,7 +28802,7 @@ pub fn svmulh_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28811,7 +28811,7 @@ pub fn svmulh_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28820,7 +28820,7 @@ pub fn svmulh_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smulh))] @@ -28829,7 +28829,7 @@ pub fn svmulh_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28842,7 +28842,7 @@ pub fn svmulh_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28851,7 +28851,7 @@ pub fn svmulh_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28860,7 +28860,7 @@ pub fn svmulh_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28869,7 +28869,7 @@ pub fn svmulh_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28878,7 +28878,7 @@ pub fn svmulh_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28887,7 +28887,7 @@ pub fn svmulh_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28900,7 +28900,7 @@ pub fn svmulh_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28909,7 +28909,7 @@ pub fn svmulh_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28918,7 +28918,7 @@ pub fn svmulh_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28927,7 +28927,7 @@ pub fn svmulh_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28936,7 +28936,7 @@ pub fn svmulh_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28945,7 +28945,7 @@ pub fn svmulh_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28958,7 +28958,7 @@ pub fn svmulh_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28967,7 +28967,7 @@ pub fn svmulh_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28976,7 +28976,7 @@ pub fn svmulh_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28985,7 +28985,7 @@ pub fn svmulh_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -28994,7 +28994,7 @@ pub fn svmulh_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -29003,7 +29003,7 @@ pub fn svmulh_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -29016,7 +29016,7 @@ pub fn svmulh_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -29025,7 +29025,7 @@ pub fn svmulh_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -29034,7 +29034,7 @@ pub fn svmulh_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -29043,7 +29043,7 @@ pub fn svmulh_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -29052,7 +29052,7 @@ pub fn svmulh_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Multiply, returning high-half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulh[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umulh))] @@ -29061,7 +29061,7 @@ pub fn svmulh_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29074,7 +29074,7 @@ pub fn svmulx_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29083,7 +29083,7 @@ pub fn svmulx_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29092,7 +29092,7 @@ pub fn svmulx_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29101,7 +29101,7 @@ pub fn svmulx_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29110,7 +29110,7 @@ pub fn svmulx_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29119,7 +29119,7 @@ pub fn svmulx_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29132,7 +29132,7 @@ pub fn svmulx_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29141,7 +29141,7 @@ pub fn svmulx_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29150,7 +29150,7 @@ pub fn svmulx_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29159,7 +29159,7 @@ pub fn svmulx_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29168,7 +29168,7 @@ pub fn svmulx_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Multiply extended (∞×0=2)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmulx[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmulx))] @@ -29177,7 +29177,7 @@ pub fn svmulx_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Bitwise NAND"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnand[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nand))] @@ -29190,7 +29190,7 @@ pub fn svnand_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fneg))] @@ -29203,7 +29203,7 @@ pub fn svneg_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svfl } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fneg))] @@ -29212,7 +29212,7 @@ pub fn svneg_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fneg))] @@ -29221,7 +29221,7 @@ pub fn svneg_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fneg))] @@ -29234,7 +29234,7 @@ pub fn svneg_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svfl } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fneg))] @@ -29243,7 +29243,7 @@ pub fn svneg_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fneg))] @@ -29252,7 +29252,7 @@ pub fn svneg_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29265,7 +29265,7 @@ pub fn svneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29274,7 +29274,7 @@ pub fn svneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29283,7 +29283,7 @@ pub fn svneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29296,7 +29296,7 @@ pub fn svneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_ } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29305,7 +29305,7 @@ pub fn svneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29314,7 +29314,7 @@ pub fn svneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29327,7 +29327,7 @@ pub fn svneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_ } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29336,7 +29336,7 @@ pub fn svneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29345,7 +29345,7 @@ pub fn svneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29358,7 +29358,7 @@ pub fn svneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_ } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29367,7 +29367,7 @@ pub fn svneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svneg[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(neg))] @@ -29376,7 +29376,7 @@ pub fn svneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29399,7 +29399,7 @@ pub fn svnmad_f32_m( } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29408,7 +29408,7 @@ pub fn svnmad_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29422,7 +29422,7 @@ pub fn svnmad_f32_x( } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29431,7 +29431,7 @@ pub fn svnmad_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29445,7 +29445,7 @@ pub fn svnmad_f32_z( } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29454,7 +29454,7 @@ pub fn svnmad_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29477,7 +29477,7 @@ pub fn svnmad_f64_m( } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29486,7 +29486,7 @@ pub fn svnmad_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29500,7 +29500,7 @@ pub fn svnmad_f64_x( } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29509,7 +29509,7 @@ pub fn svnmad_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29523,7 +29523,7 @@ pub fn svnmad_f64_z( } #[doc = "Negated multiply-add, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmad[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmad))] @@ -29532,7 +29532,7 @@ pub fn svnmad_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29555,7 +29555,7 @@ pub fn svnmla_f32_m( } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29564,7 +29564,7 @@ pub fn svnmla_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29578,7 +29578,7 @@ pub fn svnmla_f32_x( } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29587,7 +29587,7 @@ pub fn svnmla_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29601,7 +29601,7 @@ pub fn svnmla_f32_z( } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29610,7 +29610,7 @@ pub fn svnmla_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29633,7 +29633,7 @@ pub fn svnmla_f64_m( } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29642,7 +29642,7 @@ pub fn svnmla_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29656,7 +29656,7 @@ pub fn svnmla_f64_x( } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29665,7 +29665,7 @@ pub fn svnmla_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29679,7 +29679,7 @@ pub fn svnmla_f64_z( } #[doc = "Negated multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmla[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmla))] @@ -29688,7 +29688,7 @@ pub fn svnmla_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29711,7 +29711,7 @@ pub fn svnmls_f32_m( } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29720,7 +29720,7 @@ pub fn svnmls_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29734,7 +29734,7 @@ pub fn svnmls_f32_x( } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29743,7 +29743,7 @@ pub fn svnmls_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29757,7 +29757,7 @@ pub fn svnmls_f32_z( } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29766,7 +29766,7 @@ pub fn svnmls_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29789,7 +29789,7 @@ pub fn svnmls_f64_m( } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29798,7 +29798,7 @@ pub fn svnmls_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29812,7 +29812,7 @@ pub fn svnmls_f64_x( } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29821,7 +29821,7 @@ pub fn svnmls_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29835,7 +29835,7 @@ pub fn svnmls_f64_z( } #[doc = "Negated multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmls[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmls))] @@ -29844,7 +29844,7 @@ pub fn svnmls_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29867,7 +29867,7 @@ pub fn svnmsb_f32_m( } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29876,7 +29876,7 @@ pub fn svnmsb_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29890,7 +29890,7 @@ pub fn svnmsb_f32_x( } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29899,7 +29899,7 @@ pub fn svnmsb_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29913,7 +29913,7 @@ pub fn svnmsb_f32_z( } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29922,7 +29922,7 @@ pub fn svnmsb_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t, op3: f32 } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29945,7 +29945,7 @@ pub fn svnmsb_f64_m( } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29954,7 +29954,7 @@ pub fn svnmsb_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29968,7 +29968,7 @@ pub fn svnmsb_f64_x( } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29977,7 +29977,7 @@ pub fn svnmsb_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -29991,7 +29991,7 @@ pub fn svnmsb_f64_z( } #[doc = "Negated multiply-subtract, multiplicand first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmsb[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fnmsb))] @@ -30000,7 +30000,7 @@ pub fn svnmsb_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t, op3: f64 } #[doc = "Bitwise NOR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnor[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nor))] @@ -30013,7 +30013,7 @@ pub fn svnor_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30022,7 +30022,7 @@ pub fn svnot_b_z(pg: svbool_t, op: svbool_t) -> svbool_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30035,7 +30035,7 @@ pub fn svnot_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30044,7 +30044,7 @@ pub fn svnot_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30053,7 +30053,7 @@ pub fn svnot_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30066,7 +30066,7 @@ pub fn svnot_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16_ } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30075,7 +30075,7 @@ pub fn svnot_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30084,7 +30084,7 @@ pub fn svnot_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30097,7 +30097,7 @@ pub fn svnot_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32_ } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30106,7 +30106,7 @@ pub fn svnot_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30115,7 +30115,7 @@ pub fn svnot_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30128,7 +30128,7 @@ pub fn svnot_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64_ } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30137,7 +30137,7 @@ pub fn svnot_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30146,7 +30146,7 @@ pub fn svnot_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30155,7 +30155,7 @@ pub fn svnot_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_t } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30164,7 +30164,7 @@ pub fn svnot_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30173,7 +30173,7 @@ pub fn svnot_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30182,7 +30182,7 @@ pub fn svnot_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuint } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30191,7 +30191,7 @@ pub fn svnot_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30200,7 +30200,7 @@ pub fn svnot_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30209,7 +30209,7 @@ pub fn svnot_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuint } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30218,7 +30218,7 @@ pub fn svnot_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30227,7 +30227,7 @@ pub fn svnot_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30236,7 +30236,7 @@ pub fn svnot_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuint } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30245,7 +30245,7 @@ pub fn svnot_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Bitwise invert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnot[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(not))] @@ -30254,7 +30254,7 @@ pub fn svnot_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Bitwise inclusive OR, inverting second argument"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorn[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orn))] @@ -30267,7 +30267,7 @@ pub fn svorn_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_b]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30280,7 +30280,7 @@ pub fn svorr_b_z(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30293,7 +30293,7 @@ pub fn svorr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30302,7 +30302,7 @@ pub fn svorr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30311,7 +30311,7 @@ pub fn svorr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30320,7 +30320,7 @@ pub fn svorr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30329,7 +30329,7 @@ pub fn svorr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30338,7 +30338,7 @@ pub fn svorr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30351,7 +30351,7 @@ pub fn svorr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30360,7 +30360,7 @@ pub fn svorr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30369,7 +30369,7 @@ pub fn svorr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30378,7 +30378,7 @@ pub fn svorr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30387,7 +30387,7 @@ pub fn svorr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30396,7 +30396,7 @@ pub fn svorr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30409,7 +30409,7 @@ pub fn svorr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30418,7 +30418,7 @@ pub fn svorr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30427,7 +30427,7 @@ pub fn svorr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30436,7 +30436,7 @@ pub fn svorr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30445,7 +30445,7 @@ pub fn svorr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30454,7 +30454,7 @@ pub fn svorr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30467,7 +30467,7 @@ pub fn svorr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30476,7 +30476,7 @@ pub fn svorr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30485,7 +30485,7 @@ pub fn svorr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30494,7 +30494,7 @@ pub fn svorr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30503,7 +30503,7 @@ pub fn svorr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30512,7 +30512,7 @@ pub fn svorr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30521,7 +30521,7 @@ pub fn svorr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30530,7 +30530,7 @@ pub fn svorr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30539,7 +30539,7 @@ pub fn svorr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30548,7 +30548,7 @@ pub fn svorr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30557,7 +30557,7 @@ pub fn svorr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30566,7 +30566,7 @@ pub fn svorr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30575,7 +30575,7 @@ pub fn svorr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30584,7 +30584,7 @@ pub fn svorr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30593,7 +30593,7 @@ pub fn svorr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30602,7 +30602,7 @@ pub fn svorr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30611,7 +30611,7 @@ pub fn svorr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30620,7 +30620,7 @@ pub fn svorr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30629,7 +30629,7 @@ pub fn svorr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30638,7 +30638,7 @@ pub fn svorr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30647,7 +30647,7 @@ pub fn svorr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30656,7 +30656,7 @@ pub fn svorr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30665,7 +30665,7 @@ pub fn svorr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30674,7 +30674,7 @@ pub fn svorr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30683,7 +30683,7 @@ pub fn svorr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30692,7 +30692,7 @@ pub fn svorr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30701,7 +30701,7 @@ pub fn svorr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30710,7 +30710,7 @@ pub fn svorr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30719,7 +30719,7 @@ pub fn svorr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Bitwise inclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorr[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orr))] @@ -30728,7 +30728,7 @@ pub fn svorr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise inclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orv))] @@ -30741,7 +30741,7 @@ pub fn svorv_s8(pg: svbool_t, op: svint8_t) -> i8 { } #[doc = "Bitwise inclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orv))] @@ -30754,7 +30754,7 @@ pub fn svorv_s16(pg: svbool_t, op: svint16_t) -> i16 { } #[doc = "Bitwise inclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orv))] @@ -30767,7 +30767,7 @@ pub fn svorv_s32(pg: svbool_t, op: svint32_t) -> i32 { } #[doc = "Bitwise inclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orv))] @@ -30780,7 +30780,7 @@ pub fn svorv_s64(pg: svbool_t, op: svint64_t) -> i64 { } #[doc = "Bitwise inclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orv))] @@ -30789,7 +30789,7 @@ pub fn svorv_u8(pg: svbool_t, op: svuint8_t) -> u8 { } #[doc = "Bitwise inclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orv))] @@ -30798,7 +30798,7 @@ pub fn svorv_u16(pg: svbool_t, op: svuint16_t) -> u16 { } #[doc = "Bitwise inclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orv))] @@ -30807,7 +30807,7 @@ pub fn svorv_u32(pg: svbool_t, op: svuint32_t) -> u32 { } #[doc = "Bitwise inclusive OR reduction to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svorv[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(orv))] @@ -30816,7 +30816,7 @@ pub fn svorv_u64(pg: svbool_t, op: svuint64_t) -> u64 { } #[doc = "Set all predicate elements to false"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfalse[_b])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svpfalse_b() -> svbool_t { @@ -30827,7 +30827,7 @@ pub fn svpfalse_b() -> svbool_t { } #[doc = "Set the first active predicate element to true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpfirst[_b])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pfirst))] @@ -30840,7 +30840,7 @@ pub fn svpfirst_b(pg: svbool_t, op: svbool_t) -> svbool_t { } #[doc = "Find next active predicate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pnext))] @@ -30853,7 +30853,7 @@ pub fn svpnext_b8(pg: svbool_t, op: svbool_t) -> svbool_t { } #[doc = "Find next active predicate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pnext))] @@ -30866,7 +30866,7 @@ pub fn svpnext_b16(pg: svbool_t, op: svbool_t) -> svbool_t { } #[doc = "Find next active predicate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pnext))] @@ -30879,7 +30879,7 @@ pub fn svpnext_b32(pg: svbool_t, op: svbool_t) -> svbool_t { } #[doc = "Find next active predicate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpnext_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pnext))] @@ -30894,7 +30894,7 @@ pub fn svpnext_b64(pg: svbool_t, op: svbool_t) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -30909,7 +30909,7 @@ pub unsafe fn svprfb(pg: svbool_t, base: *const T) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -30924,7 +30924,7 @@ pub unsafe fn svprfh(pg: svbool_t, base: *const T) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -30939,7 +30939,7 @@ pub unsafe fn svprfw(pg: svbool_t, base: *const T) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -30954,7 +30954,7 @@ pub unsafe fn svprfd(pg: svbool_t, base: *const T) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s32]offset)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -30986,7 +30986,7 @@ pub unsafe fn svprfb_gather_s32offset( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s32]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31018,7 +31018,7 @@ pub unsafe fn svprfh_gather_s32index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s32]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31050,7 +31050,7 @@ pub unsafe fn svprfw_gather_s32index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s32]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31082,7 +31082,7 @@ pub unsafe fn svprfd_gather_s32index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[s64]offset)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31114,7 +31114,7 @@ pub unsafe fn svprfb_gather_s64offset( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[s64]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31146,7 +31146,7 @@ pub unsafe fn svprfh_gather_s64index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[s64]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31178,7 +31178,7 @@ pub unsafe fn svprfw_gather_s64index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[s64]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31210,7 +31210,7 @@ pub unsafe fn svprfd_gather_s64index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u32]offset)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31242,7 +31242,7 @@ pub unsafe fn svprfb_gather_u32offset( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u32]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31274,7 +31274,7 @@ pub unsafe fn svprfh_gather_u32index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u32]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31306,7 +31306,7 @@ pub unsafe fn svprfw_gather_u32index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u32]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31338,7 +31338,7 @@ pub unsafe fn svprfd_gather_u32index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_gather_[u64]offset)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31353,7 +31353,7 @@ pub unsafe fn svprfb_gather_u64offset( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_gather_[u64]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31368,7 +31368,7 @@ pub unsafe fn svprfh_gather_u64index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_gather_[u64]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31383,7 +31383,7 @@ pub unsafe fn svprfw_gather_u64index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_gather_[u64]index)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31399,7 +31399,7 @@ pub unsafe fn svprfd_gather_u64index( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31418,7 +31418,7 @@ pub unsafe fn svprfb_gather_u32base(pg: svbool_t, bases: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31437,7 +31437,7 @@ pub unsafe fn svprfh_gather_u32base(pg: svbool_t, bases: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31456,7 +31456,7 @@ pub unsafe fn svprfw_gather_u32base(pg: svbool_t, bases: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31475,7 +31475,7 @@ pub unsafe fn svprfd_gather_u32base(pg: svbool_t, bases: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31494,7 +31494,7 @@ pub unsafe fn svprfb_gather_u64base(pg: svbool_t, bases: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31513,7 +31513,7 @@ pub unsafe fn svprfh_gather_u64base(pg: svbool_t, bases: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31532,7 +31532,7 @@ pub unsafe fn svprfw_gather_u64base(pg: svbool_t, bases: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31551,7 +31551,7 @@ pub unsafe fn svprfd_gather_u64base(pg: svbool_t, bases: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31574,7 +31574,7 @@ pub unsafe fn svprfb_gather_u32base_offset( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31597,7 +31597,7 @@ pub unsafe fn svprfh_gather_u32base_index( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31620,7 +31620,7 @@ pub unsafe fn svprfw_gather_u32base_index( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31643,7 +31643,7 @@ pub unsafe fn svprfd_gather_u32base_index( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31666,7 +31666,7 @@ pub unsafe fn svprfb_gather_u64base_offset( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31689,7 +31689,7 @@ pub unsafe fn svprfh_gather_u64base_index( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31712,7 +31712,7 @@ pub unsafe fn svprfw_gather_u64base_index( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP }))] @@ -31734,7 +31734,7 @@ pub unsafe fn svprfd_gather_u64base_index( #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfb_vnum)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfb , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31745,7 +31745,7 @@ pub unsafe fn svprfb_vnum(pg: svbool_t, base: *const T, vn #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfh_vnum)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfh , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31756,7 +31756,7 @@ pub unsafe fn svprfh_vnum(pg: svbool_t, base: *const T, vn #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfw_vnum)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfw , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31767,7 +31767,7 @@ pub unsafe fn svprfw_vnum(pg: svbool_t, base: *const T, vn #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svprfd_vnum)"] #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (prfd , OP = { svprfop :: SV_PLDL1KEEP } , T = i64))] @@ -31776,7 +31776,7 @@ pub unsafe fn svprfd_vnum(pg: svbool_t, base: *const T, vn } #[doc = "Test whether any active element is true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_any)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ptest))] @@ -31792,7 +31792,7 @@ pub fn svptest_any(pg: svbool_t, op: svbool_t) -> bool { } #[doc = "Test whether first active element is true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_first)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ptest))] @@ -31808,7 +31808,7 @@ pub fn svptest_first(pg: svbool_t, op: svbool_t) -> bool { } #[doc = "Test whether last active element is true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptest_last)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ptest))] @@ -31824,7 +31824,7 @@ pub fn svptest_last(pg: svbool_t, op: svbool_t) -> bool { } #[doc = "Set predicate elements to true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ptrue))] @@ -31833,7 +31833,7 @@ pub fn svptrue_b8() -> svbool_t { } #[doc = "Set predicate elements to true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ptrue))] @@ -31842,7 +31842,7 @@ pub fn svptrue_b16() -> svbool_t { } #[doc = "Set predicate elements to true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ptrue))] @@ -31851,7 +31851,7 @@ pub fn svptrue_b32() -> svbool_t { } #[doc = "Set predicate elements to true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ptrue))] @@ -31860,7 +31860,7 @@ pub fn svptrue_b64() -> svbool_t { } #[doc = "Set predicate elements to true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] @@ -31873,7 +31873,7 @@ pub fn svptrue_pat_b8() -> svbool_t { } #[doc = "Set predicate elements to true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] @@ -31886,7 +31886,7 @@ pub fn svptrue_pat_b16() -> svbool_t { } #[doc = "Set predicate elements to true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] @@ -31899,7 +31899,7 @@ pub fn svptrue_pat_b32() -> svbool_t { } #[doc = "Set predicate elements to true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svptrue_pat_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (ptrue , PATTERN = { svpattern :: SV_ALL }))] @@ -31912,7 +31912,7 @@ pub fn svptrue_pat_b64() -> svbool_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -31928,7 +31928,7 @@ pub fn svqadd_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -31937,7 +31937,7 @@ pub fn svqadd_n_s8(op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -31953,7 +31953,7 @@ pub fn svqadd_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -31962,7 +31962,7 @@ pub fn svqadd_n_s16(op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -31978,7 +31978,7 @@ pub fn svqadd_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -31987,7 +31987,7 @@ pub fn svqadd_n_s32(op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -32003,7 +32003,7 @@ pub fn svqadd_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -32012,7 +32012,7 @@ pub fn svqadd_n_s64(op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -32028,7 +32028,7 @@ pub fn svqadd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -32037,7 +32037,7 @@ pub fn svqadd_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -32053,7 +32053,7 @@ pub fn svqadd_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -32062,7 +32062,7 @@ pub fn svqadd_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -32078,7 +32078,7 @@ pub fn svqadd_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -32087,7 +32087,7 @@ pub fn svqadd_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -32103,7 +32103,7 @@ pub fn svqadd_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -32112,7 +32112,7 @@ pub fn svqadd_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Saturating decrement by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] @@ -32121,7 +32121,7 @@ pub fn svqdecb_n_s32(op: i32) -> i32 { } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] @@ -32130,7 +32130,7 @@ pub fn svqdech_n_s32(op: i32) -> i32 { } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] @@ -32139,7 +32139,7 @@ pub fn svqdecw_n_s32(op: i32) -> i32 { } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] @@ -32148,7 +32148,7 @@ pub fn svqdecd_n_s32(op: i32) -> i32 { } #[doc = "Saturating decrement by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecb, IMM_FACTOR = 1))] @@ -32157,7 +32157,7 @@ pub fn svqdecb_n_s64(op: i64) -> i64 { } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] @@ -32166,7 +32166,7 @@ pub fn svqdech_n_s64(op: i64) -> i64 { } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] @@ -32175,7 +32175,7 @@ pub fn svqdecw_n_s64(op: i64) -> i64 { } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] @@ -32184,7 +32184,7 @@ pub fn svqdecd_n_s64(op: i64) -> i64 { } #[doc = "Saturating decrement by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] @@ -32193,7 +32193,7 @@ pub fn svqdecb_n_u32(op: u32) -> u32 { } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] @@ -32202,7 +32202,7 @@ pub fn svqdech_n_u32(op: u32) -> u32 { } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] @@ -32211,7 +32211,7 @@ pub fn svqdecw_n_u32(op: u32) -> u32 { } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] @@ -32220,7 +32220,7 @@ pub fn svqdecd_n_u32(op: u32) -> u32 { } #[doc = "Saturating decrement by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecb, IMM_FACTOR = 1))] @@ -32229,7 +32229,7 @@ pub fn svqdecb_n_u64(op: u64) -> u64 { } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] @@ -32238,7 +32238,7 @@ pub fn svqdech_n_u64(op: u64) -> u64 { } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] @@ -32247,7 +32247,7 @@ pub fn svqdecw_n_u64(op: u64) -> u64 { } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] @@ -32256,7 +32256,7 @@ pub fn svqdecd_n_u64(op: u64) -> u64 { } #[doc = "Saturating decrement by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32270,7 +32270,7 @@ pub fn svqdecb_pat_n_s32(op: i3 } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32284,7 +32284,7 @@ pub fn svqdech_pat_n_s32(op: i3 } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32298,7 +32298,7 @@ pub fn svqdecw_pat_n_s32(op: i3 } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32312,7 +32312,7 @@ pub fn svqdecd_pat_n_s32(op: i3 } #[doc = "Saturating decrement by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32326,7 +32326,7 @@ pub fn svqdecb_pat_n_s64(op: i6 } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32340,7 +32340,7 @@ pub fn svqdech_pat_n_s64(op: i6 } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32354,7 +32354,7 @@ pub fn svqdecw_pat_n_s64(op: i6 } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32368,7 +32368,7 @@ pub fn svqdecd_pat_n_s64(op: i6 } #[doc = "Saturating decrement by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32382,7 +32382,7 @@ pub fn svqdecb_pat_n_u32(op: u3 } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32396,7 +32396,7 @@ pub fn svqdech_pat_n_u32(op: u3 } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32410,7 +32410,7 @@ pub fn svqdecw_pat_n_u32(op: u3 } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32424,7 +32424,7 @@ pub fn svqdecd_pat_n_u32(op: u3 } #[doc = "Saturating decrement by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecb_pat[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdecb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32438,7 +32438,7 @@ pub fn svqdecb_pat_n_u64(op: u6 } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32452,7 +32452,7 @@ pub fn svqdech_pat_n_u64(op: u6 } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32466,7 +32466,7 @@ pub fn svqdecw_pat_n_u64(op: u6 } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32480,7 +32480,7 @@ pub fn svqdecd_pat_n_u64(op: u6 } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32496,7 +32496,7 @@ pub fn svqdech_pat_s16( } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32512,7 +32512,7 @@ pub fn svqdecw_pat_s32( } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32528,7 +32528,7 @@ pub fn svqdecd_pat_s64( } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech_pat[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdech , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32544,7 +32544,7 @@ pub fn svqdech_pat_u16( } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw_pat[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdecw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32560,7 +32560,7 @@ pub fn svqdecw_pat_u32( } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd_pat[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqdecd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -32576,7 +32576,7 @@ pub fn svqdecd_pat_u64( } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdech, IMM_FACTOR = 1))] @@ -32585,7 +32585,7 @@ pub fn svqdech_s16(op: svint16_t) -> svint16_t { } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecw, IMM_FACTOR = 1))] @@ -32594,7 +32594,7 @@ pub fn svqdecw_s32(op: svint32_t) -> svint32_t { } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecd, IMM_FACTOR = 1))] @@ -32603,7 +32603,7 @@ pub fn svqdecd_s64(op: svint64_t) -> svint64_t { } #[doc = "Saturating decrement by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdech[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdech, IMM_FACTOR = 1))] @@ -32612,7 +32612,7 @@ pub fn svqdech_u16(op: svuint16_t) -> svuint16_t { } #[doc = "Saturating decrement by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecw[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecw, IMM_FACTOR = 1))] @@ -32621,7 +32621,7 @@ pub fn svqdecw_u32(op: svuint32_t) -> svuint32_t { } #[doc = "Saturating decrement by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecd[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecd, IMM_FACTOR = 1))] @@ -32630,7 +32630,7 @@ pub fn svqdecd_u64(op: svuint64_t) -> svuint64_t { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32646,7 +32646,7 @@ pub fn svqdecp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32662,7 +32662,7 @@ pub fn svqdecp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32678,7 +32678,7 @@ pub fn svqdecp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s32]_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32694,7 +32694,7 @@ pub fn svqdecp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32710,7 +32710,7 @@ pub fn svqdecp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32726,7 +32726,7 @@ pub fn svqdecp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32742,7 +32742,7 @@ pub fn svqdecp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_s64]_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32758,7 +32758,7 @@ pub fn svqdecp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32774,7 +32774,7 @@ pub fn svqdecp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32790,7 +32790,7 @@ pub fn svqdecp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32806,7 +32806,7 @@ pub fn svqdecp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u32]_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32822,7 +32822,7 @@ pub fn svqdecp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32838,7 +32838,7 @@ pub fn svqdecp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32854,7 +32854,7 @@ pub fn svqdecp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32870,7 +32870,7 @@ pub fn svqdecp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_n_u64]_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32886,7 +32886,7 @@ pub fn svqdecp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32899,7 +32899,7 @@ pub fn svqdecp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32912,7 +32912,7 @@ pub fn svqdecp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdecp))] @@ -32925,7 +32925,7 @@ pub fn svqdecp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32938,7 +32938,7 @@ pub fn svqdecp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32951,7 +32951,7 @@ pub fn svqdecp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { } #[doc = "Saturating decrement by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdecp[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqdecp))] @@ -32964,7 +32964,7 @@ pub fn svqdecp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { } #[doc = "Saturating increment by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] @@ -32973,7 +32973,7 @@ pub fn svqincb_n_s32(op: i32) -> i32 { } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] @@ -32982,7 +32982,7 @@ pub fn svqinch_n_s32(op: i32) -> i32 { } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] @@ -32991,7 +32991,7 @@ pub fn svqincw_n_s32(op: i32) -> i32 { } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] @@ -33000,7 +33000,7 @@ pub fn svqincd_n_s32(op: i32) -> i32 { } #[doc = "Saturating increment by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincb, IMM_FACTOR = 1))] @@ -33009,7 +33009,7 @@ pub fn svqincb_n_s64(op: i64) -> i64 { } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] @@ -33018,7 +33018,7 @@ pub fn svqinch_n_s64(op: i64) -> i64 { } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] @@ -33027,7 +33027,7 @@ pub fn svqincw_n_s64(op: i64) -> i64 { } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] @@ -33036,7 +33036,7 @@ pub fn svqincd_n_s64(op: i64) -> i64 { } #[doc = "Saturating increment by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] @@ -33045,7 +33045,7 @@ pub fn svqincb_n_u32(op: u32) -> u32 { } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] @@ -33054,7 +33054,7 @@ pub fn svqinch_n_u32(op: u32) -> u32 { } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] @@ -33063,7 +33063,7 @@ pub fn svqincw_n_u32(op: u32) -> u32 { } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] @@ -33072,7 +33072,7 @@ pub fn svqincd_n_u32(op: u32) -> u32 { } #[doc = "Saturating increment by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincb, IMM_FACTOR = 1))] @@ -33081,7 +33081,7 @@ pub fn svqincb_n_u64(op: u64) -> u64 { } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] @@ -33090,7 +33090,7 @@ pub fn svqinch_n_u64(op: u64) -> u64 { } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] @@ -33099,7 +33099,7 @@ pub fn svqincw_n_u64(op: u64) -> u64 { } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] @@ -33108,7 +33108,7 @@ pub fn svqincd_n_u64(op: u64) -> u64 { } #[doc = "Saturating increment by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33122,7 +33122,7 @@ pub fn svqincb_pat_n_s32(op: i3 } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33136,7 +33136,7 @@ pub fn svqinch_pat_n_s32(op: i3 } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33150,7 +33150,7 @@ pub fn svqincw_pat_n_s32(op: i3 } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33164,7 +33164,7 @@ pub fn svqincd_pat_n_s32(op: i3 } #[doc = "Saturating increment by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33178,7 +33178,7 @@ pub fn svqincb_pat_n_s64(op: i6 } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33192,7 +33192,7 @@ pub fn svqinch_pat_n_s64(op: i6 } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33206,7 +33206,7 @@ pub fn svqincw_pat_n_s64(op: i6 } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33220,7 +33220,7 @@ pub fn svqincd_pat_n_s64(op: i6 } #[doc = "Saturating increment by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33234,7 +33234,7 @@ pub fn svqincb_pat_n_u32(op: u3 } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33248,7 +33248,7 @@ pub fn svqinch_pat_n_u32(op: u3 } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33262,7 +33262,7 @@ pub fn svqincw_pat_n_u32(op: u3 } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33276,7 +33276,7 @@ pub fn svqincd_pat_n_u32(op: u3 } #[doc = "Saturating increment by number of byte elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincb_pat[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqincb , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33290,7 +33290,7 @@ pub fn svqincb_pat_n_u64(op: u6 } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33304,7 +33304,7 @@ pub fn svqinch_pat_n_u64(op: u6 } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33318,7 +33318,7 @@ pub fn svqincw_pat_n_u64(op: u6 } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33332,7 +33332,7 @@ pub fn svqincd_pat_n_u64(op: u6 } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33348,7 +33348,7 @@ pub fn svqinch_pat_s16( } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33364,7 +33364,7 @@ pub fn svqincw_pat_s32( } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (sqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33380,7 +33380,7 @@ pub fn svqincd_pat_s64( } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch_pat[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqinch , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33396,7 +33396,7 @@ pub fn svqinch_pat_u16( } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw_pat[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqincw , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33412,7 +33412,7 @@ pub fn svqincw_pat_u32( } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd_pat[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] # [cfg_attr (test , assert_instr (uqincd , PATTERN = { svpattern :: SV_ALL } , IMM_FACTOR = 1))] @@ -33428,7 +33428,7 @@ pub fn svqincd_pat_u64( } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqinch, IMM_FACTOR = 1))] @@ -33437,7 +33437,7 @@ pub fn svqinch_s16(op: svint16_t) -> svint16_t { } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincw, IMM_FACTOR = 1))] @@ -33446,7 +33446,7 @@ pub fn svqincw_s32(op: svint32_t) -> svint32_t { } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincd, IMM_FACTOR = 1))] @@ -33455,7 +33455,7 @@ pub fn svqincd_s64(op: svint64_t) -> svint64_t { } #[doc = "Saturating increment by number of halfword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqinch[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqinch, IMM_FACTOR = 1))] @@ -33464,7 +33464,7 @@ pub fn svqinch_u16(op: svuint16_t) -> svuint16_t { } #[doc = "Saturating increment by number of word elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincw[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincw, IMM_FACTOR = 1))] @@ -33473,7 +33473,7 @@ pub fn svqincw_u32(op: svuint32_t) -> svuint32_t { } #[doc = "Saturating increment by number of doubleword elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincd[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincd, IMM_FACTOR = 1))] @@ -33482,7 +33482,7 @@ pub fn svqincd_u64(op: svuint64_t) -> svuint64_t { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33498,7 +33498,7 @@ pub fn svqincp_n_s32_b8(op: i32, pg: svbool_t) -> i32 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33514,7 +33514,7 @@ pub fn svqincp_n_s32_b16(op: i32, pg: svbool_t) -> i32 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33530,7 +33530,7 @@ pub fn svqincp_n_s32_b32(op: i32, pg: svbool_t) -> i32 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s32]_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33546,7 +33546,7 @@ pub fn svqincp_n_s32_b64(op: i32, pg: svbool_t) -> i32 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33562,7 +33562,7 @@ pub fn svqincp_n_s64_b8(op: i64, pg: svbool_t) -> i64 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33578,7 +33578,7 @@ pub fn svqincp_n_s64_b16(op: i64, pg: svbool_t) -> i64 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33594,7 +33594,7 @@ pub fn svqincp_n_s64_b32(op: i64, pg: svbool_t) -> i64 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_s64]_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33610,7 +33610,7 @@ pub fn svqincp_n_s64_b64(op: i64, pg: svbool_t) -> i64 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33626,7 +33626,7 @@ pub fn svqincp_n_u32_b8(op: u32, pg: svbool_t) -> u32 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33642,7 +33642,7 @@ pub fn svqincp_n_u32_b16(op: u32, pg: svbool_t) -> u32 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33658,7 +33658,7 @@ pub fn svqincp_n_u32_b32(op: u32, pg: svbool_t) -> u32 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u32]_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33674,7 +33674,7 @@ pub fn svqincp_n_u32_b64(op: u32, pg: svbool_t) -> u32 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33690,7 +33690,7 @@ pub fn svqincp_n_u64_b8(op: u64, pg: svbool_t) -> u64 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33706,7 +33706,7 @@ pub fn svqincp_n_u64_b16(op: u64, pg: svbool_t) -> u64 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33722,7 +33722,7 @@ pub fn svqincp_n_u64_b32(op: u64, pg: svbool_t) -> u64 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_n_u64]_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33738,7 +33738,7 @@ pub fn svqincp_n_u64_b64(op: u64, pg: svbool_t) -> u64 { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33751,7 +33751,7 @@ pub fn svqincp_s16(op: svint16_t, pg: svbool_t) -> svint16_t { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33764,7 +33764,7 @@ pub fn svqincp_s32(op: svint32_t, pg: svbool_t) -> svint32_t { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqincp))] @@ -33777,7 +33777,7 @@ pub fn svqincp_s64(op: svint64_t, pg: svbool_t) -> svint64_t { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33790,7 +33790,7 @@ pub fn svqincp_u16(op: svuint16_t, pg: svbool_t) -> svuint16_t { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33803,7 +33803,7 @@ pub fn svqincp_u32(op: svuint32_t, pg: svbool_t) -> svuint32_t { } #[doc = "Saturating increment by active element count"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqincp[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqincp))] @@ -33816,7 +33816,7 @@ pub fn svqincp_u64(op: svuint64_t, pg: svbool_t) -> svuint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -33832,7 +33832,7 @@ pub fn svqsub_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -33841,7 +33841,7 @@ pub fn svqsub_n_s8(op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -33857,7 +33857,7 @@ pub fn svqsub_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -33866,7 +33866,7 @@ pub fn svqsub_n_s16(op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -33882,7 +33882,7 @@ pub fn svqsub_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -33891,7 +33891,7 @@ pub fn svqsub_n_s32(op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -33907,7 +33907,7 @@ pub fn svqsub_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -33916,7 +33916,7 @@ pub fn svqsub_n_s64(op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -33932,7 +33932,7 @@ pub fn svqsub_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -33941,7 +33941,7 @@ pub fn svqsub_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -33957,7 +33957,7 @@ pub fn svqsub_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -33966,7 +33966,7 @@ pub fn svqsub_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -33982,7 +33982,7 @@ pub fn svqsub_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -33991,7 +33991,7 @@ pub fn svqsub_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -34007,7 +34007,7 @@ pub fn svqsub_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -34016,7 +34016,7 @@ pub fn svqsub_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34029,7 +34029,7 @@ pub fn svrbit_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34038,7 +34038,7 @@ pub fn svrbit_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34047,7 +34047,7 @@ pub fn svrbit_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34060,7 +34060,7 @@ pub fn svrbit_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16 } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34069,7 +34069,7 @@ pub fn svrbit_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34078,7 +34078,7 @@ pub fn svrbit_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34091,7 +34091,7 @@ pub fn svrbit_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32 } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34100,7 +34100,7 @@ pub fn svrbit_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34109,7 +34109,7 @@ pub fn svrbit_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34122,7 +34122,7 @@ pub fn svrbit_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64 } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34131,7 +34131,7 @@ pub fn svrbit_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34140,7 +34140,7 @@ pub fn svrbit_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34149,7 +34149,7 @@ pub fn svrbit_u8_m(inactive: svuint8_t, pg: svbool_t, op: svuint8_t) -> svuint8_ } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34158,7 +34158,7 @@ pub fn svrbit_u8_x(pg: svbool_t, op: svuint8_t) -> svuint8_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34167,7 +34167,7 @@ pub fn svrbit_u8_z(pg: svbool_t, op: svuint8_t) -> svuint8_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34176,7 +34176,7 @@ pub fn svrbit_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuin } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34185,7 +34185,7 @@ pub fn svrbit_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34194,7 +34194,7 @@ pub fn svrbit_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34203,7 +34203,7 @@ pub fn svrbit_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuin } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34212,7 +34212,7 @@ pub fn svrbit_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34221,7 +34221,7 @@ pub fn svrbit_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34230,7 +34230,7 @@ pub fn svrbit_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuin } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34239,7 +34239,7 @@ pub fn svrbit_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Reverse bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrbit[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rbit))] @@ -34248,7 +34248,7 @@ pub fn svrbit_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Read FFR, returning predicate of succesfully loaded elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rdffr))] @@ -34257,7 +34257,7 @@ pub fn svrdffr() -> svbool_t { } #[doc = "Read FFR, returning predicate of succesfully loaded elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrdffr_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rdffr))] @@ -34270,7 +34270,7 @@ pub fn svrdffr_z(pg: svbool_t) -> svbool_t { } #[doc = "Reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frecpe))] @@ -34286,7 +34286,7 @@ pub fn svrecpe_f32(op: svfloat32_t) -> svfloat32_t { } #[doc = "Reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frecpe))] @@ -34302,7 +34302,7 @@ pub fn svrecpe_f64(op: svfloat64_t) -> svfloat64_t { } #[doc = "Reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frecps))] @@ -34318,7 +34318,7 @@ pub fn svrecps_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecps[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frecps))] @@ -34334,7 +34334,7 @@ pub fn svrecps_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frecpx))] @@ -34350,7 +34350,7 @@ pub fn svrecpx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> sv } #[doc = "Reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frecpx))] @@ -34359,7 +34359,7 @@ pub fn svrecpx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frecpx))] @@ -34368,7 +34368,7 @@ pub fn svrecpx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frecpx))] @@ -34384,7 +34384,7 @@ pub fn svrecpx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frecpx))] @@ -34393,7 +34393,7 @@ pub fn svrecpx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Reciprocal exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpx[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frecpx))] @@ -34402,7 +34402,7 @@ pub fn svrecpx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f32_f32(op: svfloat32_t) -> svfloat32_t { @@ -34410,7 +34410,7 @@ pub fn svreinterpret_f32_f32(op: svfloat32_t) -> svfloat32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f32_f64(op: svfloat64_t) -> svfloat32_t { @@ -34418,7 +34418,7 @@ pub fn svreinterpret_f32_f64(op: svfloat64_t) -> svfloat32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f32_s8(op: svint8_t) -> svfloat32_t { @@ -34426,7 +34426,7 @@ pub fn svreinterpret_f32_s8(op: svint8_t) -> svfloat32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f32_s16(op: svint16_t) -> svfloat32_t { @@ -34434,7 +34434,7 @@ pub fn svreinterpret_f32_s16(op: svint16_t) -> svfloat32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f32_s32(op: svint32_t) -> svfloat32_t { @@ -34442,7 +34442,7 @@ pub fn svreinterpret_f32_s32(op: svint32_t) -> svfloat32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f32_s64(op: svint64_t) -> svfloat32_t { @@ -34450,7 +34450,7 @@ pub fn svreinterpret_f32_s64(op: svint64_t) -> svfloat32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f32_u8(op: svuint8_t) -> svfloat32_t { @@ -34458,7 +34458,7 @@ pub fn svreinterpret_f32_u8(op: svuint8_t) -> svfloat32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f32_u16(op: svuint16_t) -> svfloat32_t { @@ -34466,7 +34466,7 @@ pub fn svreinterpret_f32_u16(op: svuint16_t) -> svfloat32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f32_u32(op: svuint32_t) -> svfloat32_t { @@ -34474,7 +34474,7 @@ pub fn svreinterpret_f32_u32(op: svuint32_t) -> svfloat32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f32[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f32_u64(op: svuint64_t) -> svfloat32_t { @@ -34482,7 +34482,7 @@ pub fn svreinterpret_f32_u64(op: svuint64_t) -> svfloat32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f64_f32(op: svfloat32_t) -> svfloat64_t { @@ -34490,7 +34490,7 @@ pub fn svreinterpret_f64_f32(op: svfloat32_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f64_f64(op: svfloat64_t) -> svfloat64_t { @@ -34498,7 +34498,7 @@ pub fn svreinterpret_f64_f64(op: svfloat64_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f64_s8(op: svint8_t) -> svfloat64_t { @@ -34506,7 +34506,7 @@ pub fn svreinterpret_f64_s8(op: svint8_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f64_s16(op: svint16_t) -> svfloat64_t { @@ -34514,7 +34514,7 @@ pub fn svreinterpret_f64_s16(op: svint16_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f64_s32(op: svint32_t) -> svfloat64_t { @@ -34522,7 +34522,7 @@ pub fn svreinterpret_f64_s32(op: svint32_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f64_s64(op: svint64_t) -> svfloat64_t { @@ -34530,7 +34530,7 @@ pub fn svreinterpret_f64_s64(op: svint64_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f64_u8(op: svuint8_t) -> svfloat64_t { @@ -34538,7 +34538,7 @@ pub fn svreinterpret_f64_u8(op: svuint8_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f64_u16(op: svuint16_t) -> svfloat64_t { @@ -34546,7 +34546,7 @@ pub fn svreinterpret_f64_u16(op: svuint16_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f64_u32(op: svuint32_t) -> svfloat64_t { @@ -34554,7 +34554,7 @@ pub fn svreinterpret_f64_u32(op: svuint32_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_f64[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_f64_u64(op: svuint64_t) -> svfloat64_t { @@ -34562,7 +34562,7 @@ pub fn svreinterpret_f64_u64(op: svuint64_t) -> svfloat64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s8_f32(op: svfloat32_t) -> svint8_t { @@ -34570,7 +34570,7 @@ pub fn svreinterpret_s8_f32(op: svfloat32_t) -> svint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s8_f64(op: svfloat64_t) -> svint8_t { @@ -34578,7 +34578,7 @@ pub fn svreinterpret_s8_f64(op: svfloat64_t) -> svint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s8_s8(op: svint8_t) -> svint8_t { @@ -34586,7 +34586,7 @@ pub fn svreinterpret_s8_s8(op: svint8_t) -> svint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s8_s16(op: svint16_t) -> svint8_t { @@ -34594,7 +34594,7 @@ pub fn svreinterpret_s8_s16(op: svint16_t) -> svint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s8_s32(op: svint32_t) -> svint8_t { @@ -34602,7 +34602,7 @@ pub fn svreinterpret_s8_s32(op: svint32_t) -> svint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s8_s64(op: svint64_t) -> svint8_t { @@ -34610,7 +34610,7 @@ pub fn svreinterpret_s8_s64(op: svint64_t) -> svint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s8_u8(op: svuint8_t) -> svint8_t { @@ -34618,7 +34618,7 @@ pub fn svreinterpret_s8_u8(op: svuint8_t) -> svint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s8_u16(op: svuint16_t) -> svint8_t { @@ -34626,7 +34626,7 @@ pub fn svreinterpret_s8_u16(op: svuint16_t) -> svint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s8_u32(op: svuint32_t) -> svint8_t { @@ -34634,7 +34634,7 @@ pub fn svreinterpret_s8_u32(op: svuint32_t) -> svint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s8[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s8_u64(op: svuint64_t) -> svint8_t { @@ -34642,7 +34642,7 @@ pub fn svreinterpret_s8_u64(op: svuint64_t) -> svint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s16_f32(op: svfloat32_t) -> svint16_t { @@ -34650,7 +34650,7 @@ pub fn svreinterpret_s16_f32(op: svfloat32_t) -> svint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s16_f64(op: svfloat64_t) -> svint16_t { @@ -34658,7 +34658,7 @@ pub fn svreinterpret_s16_f64(op: svfloat64_t) -> svint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s16_s8(op: svint8_t) -> svint16_t { @@ -34666,7 +34666,7 @@ pub fn svreinterpret_s16_s8(op: svint8_t) -> svint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s16_s16(op: svint16_t) -> svint16_t { @@ -34674,7 +34674,7 @@ pub fn svreinterpret_s16_s16(op: svint16_t) -> svint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s16_s32(op: svint32_t) -> svint16_t { @@ -34682,7 +34682,7 @@ pub fn svreinterpret_s16_s32(op: svint32_t) -> svint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s16_s64(op: svint64_t) -> svint16_t { @@ -34690,7 +34690,7 @@ pub fn svreinterpret_s16_s64(op: svint64_t) -> svint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s16_u8(op: svuint8_t) -> svint16_t { @@ -34698,7 +34698,7 @@ pub fn svreinterpret_s16_u8(op: svuint8_t) -> svint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s16_u16(op: svuint16_t) -> svint16_t { @@ -34706,7 +34706,7 @@ pub fn svreinterpret_s16_u16(op: svuint16_t) -> svint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s16_u32(op: svuint32_t) -> svint16_t { @@ -34714,7 +34714,7 @@ pub fn svreinterpret_s16_u32(op: svuint32_t) -> svint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s16[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s16_u64(op: svuint64_t) -> svint16_t { @@ -34722,7 +34722,7 @@ pub fn svreinterpret_s16_u64(op: svuint64_t) -> svint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s32_f32(op: svfloat32_t) -> svint32_t { @@ -34730,7 +34730,7 @@ pub fn svreinterpret_s32_f32(op: svfloat32_t) -> svint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s32_f64(op: svfloat64_t) -> svint32_t { @@ -34738,7 +34738,7 @@ pub fn svreinterpret_s32_f64(op: svfloat64_t) -> svint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s32_s8(op: svint8_t) -> svint32_t { @@ -34746,7 +34746,7 @@ pub fn svreinterpret_s32_s8(op: svint8_t) -> svint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s32_s16(op: svint16_t) -> svint32_t { @@ -34754,7 +34754,7 @@ pub fn svreinterpret_s32_s16(op: svint16_t) -> svint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s32_s32(op: svint32_t) -> svint32_t { @@ -34762,7 +34762,7 @@ pub fn svreinterpret_s32_s32(op: svint32_t) -> svint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s32_s64(op: svint64_t) -> svint32_t { @@ -34770,7 +34770,7 @@ pub fn svreinterpret_s32_s64(op: svint64_t) -> svint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s32_u8(op: svuint8_t) -> svint32_t { @@ -34778,7 +34778,7 @@ pub fn svreinterpret_s32_u8(op: svuint8_t) -> svint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s32_u16(op: svuint16_t) -> svint32_t { @@ -34786,7 +34786,7 @@ pub fn svreinterpret_s32_u16(op: svuint16_t) -> svint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s32_u32(op: svuint32_t) -> svint32_t { @@ -34794,7 +34794,7 @@ pub fn svreinterpret_s32_u32(op: svuint32_t) -> svint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s32[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s32_u64(op: svuint64_t) -> svint32_t { @@ -34802,7 +34802,7 @@ pub fn svreinterpret_s32_u64(op: svuint64_t) -> svint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s64_f32(op: svfloat32_t) -> svint64_t { @@ -34810,7 +34810,7 @@ pub fn svreinterpret_s64_f32(op: svfloat32_t) -> svint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s64_f64(op: svfloat64_t) -> svint64_t { @@ -34818,7 +34818,7 @@ pub fn svreinterpret_s64_f64(op: svfloat64_t) -> svint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s64_s8(op: svint8_t) -> svint64_t { @@ -34826,7 +34826,7 @@ pub fn svreinterpret_s64_s8(op: svint8_t) -> svint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s64_s16(op: svint16_t) -> svint64_t { @@ -34834,7 +34834,7 @@ pub fn svreinterpret_s64_s16(op: svint16_t) -> svint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s64_s32(op: svint32_t) -> svint64_t { @@ -34842,7 +34842,7 @@ pub fn svreinterpret_s64_s32(op: svint32_t) -> svint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s64_s64(op: svint64_t) -> svint64_t { @@ -34850,7 +34850,7 @@ pub fn svreinterpret_s64_s64(op: svint64_t) -> svint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s64_u8(op: svuint8_t) -> svint64_t { @@ -34858,7 +34858,7 @@ pub fn svreinterpret_s64_u8(op: svuint8_t) -> svint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s64_u16(op: svuint16_t) -> svint64_t { @@ -34866,7 +34866,7 @@ pub fn svreinterpret_s64_u16(op: svuint16_t) -> svint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s64_u32(op: svuint32_t) -> svint64_t { @@ -34874,7 +34874,7 @@ pub fn svreinterpret_s64_u32(op: svuint32_t) -> svint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_s64[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_s64_u64(op: svuint64_t) -> svint64_t { @@ -34882,7 +34882,7 @@ pub fn svreinterpret_s64_u64(op: svuint64_t) -> svint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u8_f32(op: svfloat32_t) -> svuint8_t { @@ -34890,7 +34890,7 @@ pub fn svreinterpret_u8_f32(op: svfloat32_t) -> svuint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u8_f64(op: svfloat64_t) -> svuint8_t { @@ -34898,7 +34898,7 @@ pub fn svreinterpret_u8_f64(op: svfloat64_t) -> svuint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u8_s8(op: svint8_t) -> svuint8_t { @@ -34906,7 +34906,7 @@ pub fn svreinterpret_u8_s8(op: svint8_t) -> svuint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u8_s16(op: svint16_t) -> svuint8_t { @@ -34914,7 +34914,7 @@ pub fn svreinterpret_u8_s16(op: svint16_t) -> svuint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u8_s32(op: svint32_t) -> svuint8_t { @@ -34922,7 +34922,7 @@ pub fn svreinterpret_u8_s32(op: svint32_t) -> svuint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u8_s64(op: svint64_t) -> svuint8_t { @@ -34930,7 +34930,7 @@ pub fn svreinterpret_u8_s64(op: svint64_t) -> svuint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u8_u8(op: svuint8_t) -> svuint8_t { @@ -34938,7 +34938,7 @@ pub fn svreinterpret_u8_u8(op: svuint8_t) -> svuint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u8_u16(op: svuint16_t) -> svuint8_t { @@ -34946,7 +34946,7 @@ pub fn svreinterpret_u8_u16(op: svuint16_t) -> svuint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u8_u32(op: svuint32_t) -> svuint8_t { @@ -34954,7 +34954,7 @@ pub fn svreinterpret_u8_u32(op: svuint32_t) -> svuint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u8[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u8_u64(op: svuint64_t) -> svuint8_t { @@ -34962,7 +34962,7 @@ pub fn svreinterpret_u8_u64(op: svuint64_t) -> svuint8_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u16_f32(op: svfloat32_t) -> svuint16_t { @@ -34970,7 +34970,7 @@ pub fn svreinterpret_u16_f32(op: svfloat32_t) -> svuint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u16_f64(op: svfloat64_t) -> svuint16_t { @@ -34978,7 +34978,7 @@ pub fn svreinterpret_u16_f64(op: svfloat64_t) -> svuint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u16_s8(op: svint8_t) -> svuint16_t { @@ -34986,7 +34986,7 @@ pub fn svreinterpret_u16_s8(op: svint8_t) -> svuint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u16_s16(op: svint16_t) -> svuint16_t { @@ -34994,7 +34994,7 @@ pub fn svreinterpret_u16_s16(op: svint16_t) -> svuint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u16_s32(op: svint32_t) -> svuint16_t { @@ -35002,7 +35002,7 @@ pub fn svreinterpret_u16_s32(op: svint32_t) -> svuint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u16_s64(op: svint64_t) -> svuint16_t { @@ -35010,7 +35010,7 @@ pub fn svreinterpret_u16_s64(op: svint64_t) -> svuint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u16_u8(op: svuint8_t) -> svuint16_t { @@ -35018,7 +35018,7 @@ pub fn svreinterpret_u16_u8(op: svuint8_t) -> svuint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u16_u16(op: svuint16_t) -> svuint16_t { @@ -35026,7 +35026,7 @@ pub fn svreinterpret_u16_u16(op: svuint16_t) -> svuint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u16_u32(op: svuint32_t) -> svuint16_t { @@ -35034,7 +35034,7 @@ pub fn svreinterpret_u16_u32(op: svuint32_t) -> svuint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u16[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u16_u64(op: svuint64_t) -> svuint16_t { @@ -35042,7 +35042,7 @@ pub fn svreinterpret_u16_u64(op: svuint64_t) -> svuint16_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u32_f32(op: svfloat32_t) -> svuint32_t { @@ -35050,7 +35050,7 @@ pub fn svreinterpret_u32_f32(op: svfloat32_t) -> svuint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u32_f64(op: svfloat64_t) -> svuint32_t { @@ -35058,7 +35058,7 @@ pub fn svreinterpret_u32_f64(op: svfloat64_t) -> svuint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u32_s8(op: svint8_t) -> svuint32_t { @@ -35066,7 +35066,7 @@ pub fn svreinterpret_u32_s8(op: svint8_t) -> svuint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u32_s16(op: svint16_t) -> svuint32_t { @@ -35074,7 +35074,7 @@ pub fn svreinterpret_u32_s16(op: svint16_t) -> svuint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u32_s32(op: svint32_t) -> svuint32_t { @@ -35082,7 +35082,7 @@ pub fn svreinterpret_u32_s32(op: svint32_t) -> svuint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u32_s64(op: svint64_t) -> svuint32_t { @@ -35090,7 +35090,7 @@ pub fn svreinterpret_u32_s64(op: svint64_t) -> svuint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u32_u8(op: svuint8_t) -> svuint32_t { @@ -35098,7 +35098,7 @@ pub fn svreinterpret_u32_u8(op: svuint8_t) -> svuint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u32_u16(op: svuint16_t) -> svuint32_t { @@ -35106,7 +35106,7 @@ pub fn svreinterpret_u32_u16(op: svuint16_t) -> svuint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u32_u32(op: svuint32_t) -> svuint32_t { @@ -35114,7 +35114,7 @@ pub fn svreinterpret_u32_u32(op: svuint32_t) -> svuint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u32[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u32_u64(op: svuint64_t) -> svuint32_t { @@ -35122,7 +35122,7 @@ pub fn svreinterpret_u32_u64(op: svuint64_t) -> svuint32_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u64_f32(op: svfloat32_t) -> svuint64_t { @@ -35130,7 +35130,7 @@ pub fn svreinterpret_u64_f32(op: svfloat32_t) -> svuint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u64_f64(op: svfloat64_t) -> svuint64_t { @@ -35138,7 +35138,7 @@ pub fn svreinterpret_u64_f64(op: svfloat64_t) -> svuint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u64_s8(op: svint8_t) -> svuint64_t { @@ -35146,7 +35146,7 @@ pub fn svreinterpret_u64_s8(op: svint8_t) -> svuint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u64_s16(op: svint16_t) -> svuint64_t { @@ -35154,7 +35154,7 @@ pub fn svreinterpret_u64_s16(op: svint16_t) -> svuint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u64_s32(op: svint32_t) -> svuint64_t { @@ -35162,7 +35162,7 @@ pub fn svreinterpret_u64_s32(op: svint32_t) -> svuint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u64_s64(op: svint64_t) -> svuint64_t { @@ -35170,7 +35170,7 @@ pub fn svreinterpret_u64_s64(op: svint64_t) -> svuint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u64_u8(op: svuint8_t) -> svuint64_t { @@ -35178,7 +35178,7 @@ pub fn svreinterpret_u64_u8(op: svuint8_t) -> svuint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u64_u16(op: svuint16_t) -> svuint64_t { @@ -35186,7 +35186,7 @@ pub fn svreinterpret_u64_u16(op: svuint16_t) -> svuint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u64_u32(op: svuint32_t) -> svuint64_t { @@ -35194,7 +35194,7 @@ pub fn svreinterpret_u64_u32(op: svuint32_t) -> svuint64_t { } #[doc = "Reinterpret vector contents"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svreinterpret_u64[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t { @@ -35202,7 +35202,7 @@ pub fn svreinterpret_u64_u64(op: svuint64_t) -> svuint64_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35215,7 +35215,7 @@ pub fn svrev_b8(op: svbool_t) -> svbool_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35228,7 +35228,7 @@ pub fn svrev_b16(op: svbool_t) -> svbool_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35241,7 +35241,7 @@ pub fn svrev_b32(op: svbool_t) -> svbool_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35254,7 +35254,7 @@ pub fn svrev_b64(op: svbool_t) -> svbool_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35267,7 +35267,7 @@ pub fn svrev_f32(op: svfloat32_t) -> svfloat32_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35280,7 +35280,7 @@ pub fn svrev_f64(op: svfloat64_t) -> svfloat64_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35293,7 +35293,7 @@ pub fn svrev_s8(op: svint8_t) -> svint8_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35306,7 +35306,7 @@ pub fn svrev_s16(op: svint16_t) -> svint16_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35319,7 +35319,7 @@ pub fn svrev_s32(op: svint32_t) -> svint32_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35332,7 +35332,7 @@ pub fn svrev_s64(op: svint64_t) -> svint64_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35341,7 +35341,7 @@ pub fn svrev_u8(op: svuint8_t) -> svuint8_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35350,7 +35350,7 @@ pub fn svrev_u16(op: svuint16_t) -> svuint16_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35359,7 +35359,7 @@ pub fn svrev_u32(op: svuint32_t) -> svuint32_t { } #[doc = "Reverse all elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrev[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rev))] @@ -35368,7 +35368,7 @@ pub fn svrev_u64(op: svuint64_t) -> svuint64_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35381,7 +35381,7 @@ pub fn svrevb_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16 } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35390,7 +35390,7 @@ pub fn svrevb_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35399,7 +35399,7 @@ pub fn svrevb_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35412,7 +35412,7 @@ pub fn svrevb_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32 } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35421,7 +35421,7 @@ pub fn svrevb_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35430,7 +35430,7 @@ pub fn svrevb_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35443,7 +35443,7 @@ pub fn svrevb_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64 } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35452,7 +35452,7 @@ pub fn svrevb_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35461,7 +35461,7 @@ pub fn svrevb_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35470,7 +35470,7 @@ pub fn svrevb_u16_m(inactive: svuint16_t, pg: svbool_t, op: svuint16_t) -> svuin } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35479,7 +35479,7 @@ pub fn svrevb_u16_x(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35488,7 +35488,7 @@ pub fn svrevb_u16_z(pg: svbool_t, op: svuint16_t) -> svuint16_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35497,7 +35497,7 @@ pub fn svrevb_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuin } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35506,7 +35506,7 @@ pub fn svrevb_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35515,7 +35515,7 @@ pub fn svrevb_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35524,7 +35524,7 @@ pub fn svrevb_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuin } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35533,7 +35533,7 @@ pub fn svrevb_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Reverse bytes within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevb[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revb))] @@ -35542,7 +35542,7 @@ pub fn svrevb_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35555,7 +35555,7 @@ pub fn svrevh_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32 } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35564,7 +35564,7 @@ pub fn svrevh_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35573,7 +35573,7 @@ pub fn svrevh_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35586,7 +35586,7 @@ pub fn svrevh_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64 } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35595,7 +35595,7 @@ pub fn svrevh_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35604,7 +35604,7 @@ pub fn svrevh_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35613,7 +35613,7 @@ pub fn svrevh_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svuin } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35622,7 +35622,7 @@ pub fn svrevh_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35631,7 +35631,7 @@ pub fn svrevh_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35640,7 +35640,7 @@ pub fn svrevh_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuin } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35649,7 +35649,7 @@ pub fn svrevh_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Reverse halfwords within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevh[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revh))] @@ -35658,7 +35658,7 @@ pub fn svrevh_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Reverse words within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revw))] @@ -35671,7 +35671,7 @@ pub fn svrevw_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64 } #[doc = "Reverse words within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revw))] @@ -35680,7 +35680,7 @@ pub fn svrevw_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Reverse words within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revw))] @@ -35689,7 +35689,7 @@ pub fn svrevw_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Reverse words within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revw))] @@ -35698,7 +35698,7 @@ pub fn svrevw_u64_m(inactive: svuint64_t, pg: svbool_t, op: svuint64_t) -> svuin } #[doc = "Reverse words within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revw))] @@ -35707,7 +35707,7 @@ pub fn svrevw_u64_x(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Reverse words within elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrevw[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(revw))] @@ -35716,7 +35716,7 @@ pub fn svrevw_u64_z(pg: svbool_t, op: svuint64_t) -> svuint64_t { } #[doc = "Round to nearest, ties away from zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinta))] @@ -35729,7 +35729,7 @@ pub fn svrinta_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> sv } #[doc = "Round to nearest, ties away from zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinta))] @@ -35738,7 +35738,7 @@ pub fn svrinta_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round to nearest, ties away from zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinta))] @@ -35747,7 +35747,7 @@ pub fn svrinta_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round to nearest, ties away from zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinta))] @@ -35760,7 +35760,7 @@ pub fn svrinta_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Round to nearest, ties away from zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinta))] @@ -35769,7 +35769,7 @@ pub fn svrinta_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round to nearest, ties away from zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinta[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinta))] @@ -35778,7 +35778,7 @@ pub fn svrinta_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round using current rounding mode (inexact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinti))] @@ -35791,7 +35791,7 @@ pub fn svrinti_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> sv } #[doc = "Round using current rounding mode (inexact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinti))] @@ -35800,7 +35800,7 @@ pub fn svrinti_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round using current rounding mode (inexact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinti))] @@ -35809,7 +35809,7 @@ pub fn svrinti_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round using current rounding mode (inexact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinti))] @@ -35822,7 +35822,7 @@ pub fn svrinti_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Round using current rounding mode (inexact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinti))] @@ -35831,7 +35831,7 @@ pub fn svrinti_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round using current rounding mode (inexact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrinti[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frinti))] @@ -35840,7 +35840,7 @@ pub fn svrinti_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round towards -∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintm))] @@ -35853,7 +35853,7 @@ pub fn svrintm_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> sv } #[doc = "Round towards -∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintm))] @@ -35862,7 +35862,7 @@ pub fn svrintm_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round towards -∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintm))] @@ -35871,7 +35871,7 @@ pub fn svrintm_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round towards -∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintm))] @@ -35884,7 +35884,7 @@ pub fn svrintm_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Round towards -∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintm))] @@ -35893,7 +35893,7 @@ pub fn svrintm_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round towards -∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintm[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintm))] @@ -35902,7 +35902,7 @@ pub fn svrintm_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round to nearest, ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintn))] @@ -35915,7 +35915,7 @@ pub fn svrintn_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> sv } #[doc = "Round to nearest, ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintn))] @@ -35924,7 +35924,7 @@ pub fn svrintn_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round to nearest, ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintn))] @@ -35933,7 +35933,7 @@ pub fn svrintn_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round to nearest, ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintn))] @@ -35946,7 +35946,7 @@ pub fn svrintn_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Round to nearest, ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintn))] @@ -35955,7 +35955,7 @@ pub fn svrintn_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round to nearest, ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintn[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintn))] @@ -35964,7 +35964,7 @@ pub fn svrintn_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round towards +∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintp))] @@ -35977,7 +35977,7 @@ pub fn svrintp_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> sv } #[doc = "Round towards +∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintp))] @@ -35986,7 +35986,7 @@ pub fn svrintp_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round towards +∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintp))] @@ -35995,7 +35995,7 @@ pub fn svrintp_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round towards +∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintp))] @@ -36008,7 +36008,7 @@ pub fn svrintp_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Round towards +∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintp))] @@ -36017,7 +36017,7 @@ pub fn svrintp_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round towards +∞"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintp[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintp))] @@ -36026,7 +36026,7 @@ pub fn svrintp_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round using current rounding mode (exact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintx))] @@ -36039,7 +36039,7 @@ pub fn svrintx_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> sv } #[doc = "Round using current rounding mode (exact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintx))] @@ -36048,7 +36048,7 @@ pub fn svrintx_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round using current rounding mode (exact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintx))] @@ -36057,7 +36057,7 @@ pub fn svrintx_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round using current rounding mode (exact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintx))] @@ -36070,7 +36070,7 @@ pub fn svrintx_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Round using current rounding mode (exact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintx))] @@ -36079,7 +36079,7 @@ pub fn svrintx_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round using current rounding mode (exact)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintx[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintx))] @@ -36088,7 +36088,7 @@ pub fn svrintx_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round towards zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintz))] @@ -36101,7 +36101,7 @@ pub fn svrintz_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> sv } #[doc = "Round towards zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintz))] @@ -36110,7 +36110,7 @@ pub fn svrintz_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round towards zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintz))] @@ -36119,7 +36119,7 @@ pub fn svrintz_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Round towards zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintz))] @@ -36132,7 +36132,7 @@ pub fn svrintz_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Round towards zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintz))] @@ -36141,7 +36141,7 @@ pub fn svrintz_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Round towards zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrintz[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frintz))] @@ -36150,7 +36150,7 @@ pub fn svrintz_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frsqrte))] @@ -36166,7 +36166,7 @@ pub fn svrsqrte_f32(op: svfloat32_t) -> svfloat32_t { } #[doc = "Reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frsqrte))] @@ -36182,7 +36182,7 @@ pub fn svrsqrte_f64(op: svfloat64_t) -> svfloat64_t { } #[doc = "Reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frsqrts))] @@ -36198,7 +36198,7 @@ pub fn svrsqrts_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrts[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(frsqrts))] @@ -36214,7 +36214,7 @@ pub fn svrsqrts_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36227,7 +36227,7 @@ pub fn svscale_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat3 } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36236,7 +36236,7 @@ pub fn svscale_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36245,7 +36245,7 @@ pub fn svscale_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat3 } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36254,7 +36254,7 @@ pub fn svscale_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36263,7 +36263,7 @@ pub fn svscale_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svint32_t) -> svfloat3 } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36272,7 +36272,7 @@ pub fn svscale_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: i32) -> svfloat32_t } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36285,7 +36285,7 @@ pub fn svscale_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat6 } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36294,7 +36294,7 @@ pub fn svscale_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36303,7 +36303,7 @@ pub fn svscale_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat6 } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36312,7 +36312,7 @@ pub fn svscale_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36321,7 +36321,7 @@ pub fn svscale_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svint64_t) -> svfloat6 } #[doc = "Adjust exponent"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svscale[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fscale))] @@ -36330,7 +36330,7 @@ pub fn svscale_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: i64) -> svfloat64_t } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_b])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36339,7 +36339,7 @@ pub fn svsel_b(pg: svbool_t, op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36348,7 +36348,7 @@ pub fn svsel_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_ } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36357,7 +36357,7 @@ pub fn svsel_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_ } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36366,7 +36366,7 @@ pub fn svsel_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36375,7 +36375,7 @@ pub fn svsel_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36384,7 +36384,7 @@ pub fn svsel_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36393,7 +36393,7 @@ pub fn svsel_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36402,7 +36402,7 @@ pub fn svsel_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36411,7 +36411,7 @@ pub fn svsel_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36420,7 +36420,7 @@ pub fn svsel_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Conditionally select elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsel[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sel))] @@ -36429,7 +36429,7 @@ pub fn svsel_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Change one vector in a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset2_f32(tuple: svfloat32x2_t, x: svfloat32_t) -> svfloat32x2_t { @@ -36438,7 +36438,7 @@ pub fn svset2_f32(tuple: svfloat32x2_t, x: svfloat32_t) -> } #[doc = "Change one vector in a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset2_f64(tuple: svfloat64x2_t, x: svfloat64_t) -> svfloat64x2_t { @@ -36447,7 +36447,7 @@ pub fn svset2_f64(tuple: svfloat64x2_t, x: svfloat64_t) -> } #[doc = "Change one vector in a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset2_s8(tuple: svint8x2_t, x: svint8_t) -> svint8x2_t { @@ -36456,7 +36456,7 @@ pub fn svset2_s8(tuple: svint8x2_t, x: svint8_t) -> svint8 } #[doc = "Change one vector in a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset2_s16(tuple: svint16x2_t, x: svint16_t) -> svint16x2_t { @@ -36465,7 +36465,7 @@ pub fn svset2_s16(tuple: svint16x2_t, x: svint16_t) -> svi } #[doc = "Change one vector in a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset2_s32(tuple: svint32x2_t, x: svint32_t) -> svint32x2_t { @@ -36474,7 +36474,7 @@ pub fn svset2_s32(tuple: svint32x2_t, x: svint32_t) -> svi } #[doc = "Change one vector in a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset2_s64(tuple: svint64x2_t, x: svint64_t) -> svint64x2_t { @@ -36483,7 +36483,7 @@ pub fn svset2_s64(tuple: svint64x2_t, x: svint64_t) -> svi } #[doc = "Change one vector in a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset2_u8(tuple: svuint8x2_t, x: svuint8_t) -> svuint8x2_t { @@ -36492,7 +36492,7 @@ pub fn svset2_u8(tuple: svuint8x2_t, x: svuint8_t) -> svui } #[doc = "Change one vector in a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset2_u16(tuple: svuint16x2_t, x: svuint16_t) -> svuint16x2_t { @@ -36501,7 +36501,7 @@ pub fn svset2_u16(tuple: svuint16x2_t, x: svuint16_t) -> s } #[doc = "Change one vector in a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset2_u32(tuple: svuint32x2_t, x: svuint32_t) -> svuint32x2_t { @@ -36510,7 +36510,7 @@ pub fn svset2_u32(tuple: svuint32x2_t, x: svuint32_t) -> s } #[doc = "Change one vector in a tuple of two vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset2[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset2_u64(tuple: svuint64x2_t, x: svuint64_t) -> svuint64x2_t { @@ -36519,7 +36519,7 @@ pub fn svset2_u64(tuple: svuint64x2_t, x: svuint64_t) -> s } #[doc = "Change one vector in a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset3_f32(tuple: svfloat32x3_t, x: svfloat32_t) -> svfloat32x3_t { @@ -36528,7 +36528,7 @@ pub fn svset3_f32(tuple: svfloat32x3_t, x: svfloat32_t) -> } #[doc = "Change one vector in a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset3_f64(tuple: svfloat64x3_t, x: svfloat64_t) -> svfloat64x3_t { @@ -36537,7 +36537,7 @@ pub fn svset3_f64(tuple: svfloat64x3_t, x: svfloat64_t) -> } #[doc = "Change one vector in a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset3_s8(tuple: svint8x3_t, x: svint8_t) -> svint8x3_t { @@ -36546,7 +36546,7 @@ pub fn svset3_s8(tuple: svint8x3_t, x: svint8_t) -> svint8 } #[doc = "Change one vector in a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset3_s16(tuple: svint16x3_t, x: svint16_t) -> svint16x3_t { @@ -36555,7 +36555,7 @@ pub fn svset3_s16(tuple: svint16x3_t, x: svint16_t) -> svi } #[doc = "Change one vector in a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset3_s32(tuple: svint32x3_t, x: svint32_t) -> svint32x3_t { @@ -36564,7 +36564,7 @@ pub fn svset3_s32(tuple: svint32x3_t, x: svint32_t) -> svi } #[doc = "Change one vector in a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset3_s64(tuple: svint64x3_t, x: svint64_t) -> svint64x3_t { @@ -36573,7 +36573,7 @@ pub fn svset3_s64(tuple: svint64x3_t, x: svint64_t) -> svi } #[doc = "Change one vector in a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset3_u8(tuple: svuint8x3_t, x: svuint8_t) -> svuint8x3_t { @@ -36582,7 +36582,7 @@ pub fn svset3_u8(tuple: svuint8x3_t, x: svuint8_t) -> svui } #[doc = "Change one vector in a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset3_u16(tuple: svuint16x3_t, x: svuint16_t) -> svuint16x3_t { @@ -36591,7 +36591,7 @@ pub fn svset3_u16(tuple: svuint16x3_t, x: svuint16_t) -> s } #[doc = "Change one vector in a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset3_u32(tuple: svuint32x3_t, x: svuint32_t) -> svuint32x3_t { @@ -36600,7 +36600,7 @@ pub fn svset3_u32(tuple: svuint32x3_t, x: svuint32_t) -> s } #[doc = "Change one vector in a tuple of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset3[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset3_u64(tuple: svuint64x3_t, x: svuint64_t) -> svuint64x3_t { @@ -36609,7 +36609,7 @@ pub fn svset3_u64(tuple: svuint64x3_t, x: svuint64_t) -> s } #[doc = "Change one vector in a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset4_f32(tuple: svfloat32x4_t, x: svfloat32_t) -> svfloat32x4_t { @@ -36618,7 +36618,7 @@ pub fn svset4_f32(tuple: svfloat32x4_t, x: svfloat32_t) -> } #[doc = "Change one vector in a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset4_f64(tuple: svfloat64x4_t, x: svfloat64_t) -> svfloat64x4_t { @@ -36627,7 +36627,7 @@ pub fn svset4_f64(tuple: svfloat64x4_t, x: svfloat64_t) -> } #[doc = "Change one vector in a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset4_s8(tuple: svint8x4_t, x: svint8_t) -> svint8x4_t { @@ -36636,7 +36636,7 @@ pub fn svset4_s8(tuple: svint8x4_t, x: svint8_t) -> svint8 } #[doc = "Change one vector in a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset4_s16(tuple: svint16x4_t, x: svint16_t) -> svint16x4_t { @@ -36645,7 +36645,7 @@ pub fn svset4_s16(tuple: svint16x4_t, x: svint16_t) -> svi } #[doc = "Change one vector in a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset4_s32(tuple: svint32x4_t, x: svint32_t) -> svint32x4_t { @@ -36654,7 +36654,7 @@ pub fn svset4_s32(tuple: svint32x4_t, x: svint32_t) -> svi } #[doc = "Change one vector in a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset4_s64(tuple: svint64x4_t, x: svint64_t) -> svint64x4_t { @@ -36663,7 +36663,7 @@ pub fn svset4_s64(tuple: svint64x4_t, x: svint64_t) -> svi } #[doc = "Change one vector in a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset4_u8(tuple: svuint8x4_t, x: svuint8_t) -> svuint8x4_t { @@ -36672,7 +36672,7 @@ pub fn svset4_u8(tuple: svuint8x4_t, x: svuint8_t) -> svui } #[doc = "Change one vector in a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset4_u16(tuple: svuint16x4_t, x: svuint16_t) -> svuint16x4_t { @@ -36681,7 +36681,7 @@ pub fn svset4_u16(tuple: svuint16x4_t, x: svuint16_t) -> s } #[doc = "Change one vector in a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset4_u32(tuple: svuint32x4_t, x: svuint32_t) -> svuint32x4_t { @@ -36690,7 +36690,7 @@ pub fn svset4_u32(tuple: svuint32x4_t, x: svuint32_t) -> s } #[doc = "Change one vector in a tuple of four vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svset4[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub fn svset4_u64(tuple: svuint64x4_t, x: svuint64_t) -> svuint64x4_t { @@ -36699,7 +36699,7 @@ pub fn svset4_u64(tuple: svuint64x4_t, x: svuint64_t) -> s } #[doc = "Initialize the first-fault register to all-true"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsetffr)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(setffr))] @@ -36712,7 +36712,7 @@ pub fn svsetffr() { } #[doc = "Splice two vectors under predicate control"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(splice))] @@ -36725,7 +36725,7 @@ pub fn svsplice_f32(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Splice two vectors under predicate control"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(splice))] @@ -36738,7 +36738,7 @@ pub fn svsplice_f64(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Splice two vectors under predicate control"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(splice))] @@ -36751,7 +36751,7 @@ pub fn svsplice_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Splice two vectors under predicate control"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(splice))] @@ -36764,7 +36764,7 @@ pub fn svsplice_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Splice two vectors under predicate control"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(splice))] @@ -36777,7 +36777,7 @@ pub fn svsplice_s32(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Splice two vectors under predicate control"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(splice))] @@ -36790,7 +36790,7 @@ pub fn svsplice_s64(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Splice two vectors under predicate control"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(splice))] @@ -36799,7 +36799,7 @@ pub fn svsplice_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Splice two vectors under predicate control"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(splice))] @@ -36808,7 +36808,7 @@ pub fn svsplice_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Splice two vectors under predicate control"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(splice))] @@ -36817,7 +36817,7 @@ pub fn svsplice_u32(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Splice two vectors under predicate control"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsplice[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(splice))] @@ -36826,7 +36826,7 @@ pub fn svsplice_u64(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Square root"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsqrt))] @@ -36839,7 +36839,7 @@ pub fn svsqrt_f32_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat32_t) -> svf } #[doc = "Square root"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsqrt))] @@ -36848,7 +36848,7 @@ pub fn svsqrt_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Square root"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsqrt))] @@ -36857,7 +36857,7 @@ pub fn svsqrt_f32_z(pg: svbool_t, op: svfloat32_t) -> svfloat32_t { } #[doc = "Square root"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsqrt))] @@ -36870,7 +36870,7 @@ pub fn svsqrt_f64_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat64_t) -> svf } #[doc = "Square root"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsqrt))] @@ -36879,7 +36879,7 @@ pub fn svsqrt_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { } #[doc = "Square root"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqrt[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsqrt))] @@ -36891,7 +36891,7 @@ pub fn svsqrt_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat64_t { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -36907,7 +36907,7 @@ pub unsafe fn svst1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -36923,7 +36923,7 @@ pub unsafe fn svst1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -36939,7 +36939,7 @@ pub unsafe fn svst1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -36955,7 +36955,7 @@ pub unsafe fn svst1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -36971,7 +36971,7 @@ pub unsafe fn svst1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -36987,7 +36987,7 @@ pub unsafe fn svst1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -36999,7 +36999,7 @@ pub unsafe fn svst1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -37011,7 +37011,7 @@ pub unsafe fn svst1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37023,7 +37023,7 @@ pub unsafe fn svst1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37035,7 +37035,7 @@ pub unsafe fn svst1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37064,7 +37064,7 @@ pub unsafe fn svst1_scatter_s32index_f32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37093,7 +37093,7 @@ pub unsafe fn svst1_scatter_s32index_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37110,7 +37110,7 @@ pub unsafe fn svst1_scatter_s32index_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37139,7 +37139,7 @@ pub unsafe fn svst1_scatter_s64index_f64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37168,7 +37168,7 @@ pub unsafe fn svst1_scatter_s64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37185,7 +37185,7 @@ pub unsafe fn svst1_scatter_s64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37214,7 +37214,7 @@ pub unsafe fn svst1_scatter_u32index_f32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37243,7 +37243,7 @@ pub unsafe fn svst1_scatter_u32index_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37260,7 +37260,7 @@ pub unsafe fn svst1_scatter_u32index_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37277,7 +37277,7 @@ pub unsafe fn svst1_scatter_u64index_f64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37294,7 +37294,7 @@ pub unsafe fn svst1_scatter_u64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37311,7 +37311,7 @@ pub unsafe fn svst1_scatter_u64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37340,7 +37340,7 @@ pub unsafe fn svst1_scatter_s32offset_f32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37369,7 +37369,7 @@ pub unsafe fn svst1_scatter_s32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37386,7 +37386,7 @@ pub unsafe fn svst1_scatter_s32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37415,7 +37415,7 @@ pub unsafe fn svst1_scatter_s64offset_f64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37444,7 +37444,7 @@ pub unsafe fn svst1_scatter_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37461,7 +37461,7 @@ pub unsafe fn svst1_scatter_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37490,7 +37490,7 @@ pub unsafe fn svst1_scatter_u32offset_f32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37519,7 +37519,7 @@ pub unsafe fn svst1_scatter_u32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37536,7 +37536,7 @@ pub unsafe fn svst1_scatter_u32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37553,7 +37553,7 @@ pub unsafe fn svst1_scatter_u64offset_f64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37570,7 +37570,7 @@ pub unsafe fn svst1_scatter_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37588,7 +37588,7 @@ pub unsafe fn svst1_scatter_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37601,7 +37601,7 @@ pub unsafe fn svst1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37614,7 +37614,7 @@ pub unsafe fn svst1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37627,7 +37627,7 @@ pub unsafe fn svst1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37640,7 +37640,7 @@ pub unsafe fn svst1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37653,7 +37653,7 @@ pub unsafe fn svst1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37666,7 +37666,7 @@ pub unsafe fn svst1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37684,7 +37684,7 @@ pub unsafe fn svst1_scatter_u32base_index_f32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37702,7 +37702,7 @@ pub unsafe fn svst1_scatter_u32base_index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37720,7 +37720,7 @@ pub unsafe fn svst1_scatter_u32base_index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37738,7 +37738,7 @@ pub unsafe fn svst1_scatter_u64base_index_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37756,7 +37756,7 @@ pub unsafe fn svst1_scatter_u64base_index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37774,7 +37774,7 @@ pub unsafe fn svst1_scatter_u64base_index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37804,7 +37804,7 @@ pub unsafe fn svst1_scatter_u32base_offset_f32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37834,7 +37834,7 @@ pub unsafe fn svst1_scatter_u32base_offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37852,7 +37852,7 @@ pub unsafe fn svst1_scatter_u32base_offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37882,7 +37882,7 @@ pub unsafe fn svst1_scatter_u64base_offset_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37912,7 +37912,7 @@ pub unsafe fn svst1_scatter_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37929,7 +37929,7 @@ pub unsafe fn svst1_scatter_u64base_offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37941,7 +37941,7 @@ pub unsafe fn svst1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfl #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -37953,7 +37953,7 @@ pub unsafe fn svst1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfl #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -37965,7 +37965,7 @@ pub unsafe fn svst1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8 #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -37977,7 +37977,7 @@ pub unsafe fn svst1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -37989,7 +37989,7 @@ pub unsafe fn svst1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -38001,7 +38001,7 @@ pub unsafe fn svst1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38013,7 +38013,7 @@ pub unsafe fn svst1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38025,7 +38025,7 @@ pub unsafe fn svst1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -38037,7 +38037,7 @@ pub unsafe fn svst1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1d))] @@ -38049,7 +38049,7 @@ pub unsafe fn svst1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38069,7 +38069,7 @@ pub unsafe fn svst1b_s16(pg: svbool_t, base: *mut i8, data: svint16_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38089,7 +38089,7 @@ pub unsafe fn svst1b_s32(pg: svbool_t, base: *mut i8, data: svint32_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38109,7 +38109,7 @@ pub unsafe fn svst1h_s32(pg: svbool_t, base: *mut i16, data: svint32_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38129,7 +38129,7 @@ pub unsafe fn svst1b_s64(pg: svbool_t, base: *mut i8, data: svint64_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38149,7 +38149,7 @@ pub unsafe fn svst1h_s64(pg: svbool_t, base: *mut i16, data: svint64_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -38169,7 +38169,7 @@ pub unsafe fn svst1w_s64(pg: svbool_t, base: *mut i32, data: svint64_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38181,7 +38181,7 @@ pub unsafe fn svst1b_u16(pg: svbool_t, base: *mut u8, data: svuint16_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38193,7 +38193,7 @@ pub unsafe fn svst1b_u32(pg: svbool_t, base: *mut u8, data: svuint32_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38205,7 +38205,7 @@ pub unsafe fn svst1h_u32(pg: svbool_t, base: *mut u16, data: svuint32_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38217,7 +38217,7 @@ pub unsafe fn svst1b_u64(pg: svbool_t, base: *mut u8, data: svuint64_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38229,7 +38229,7 @@ pub unsafe fn svst1h_u64(pg: svbool_t, base: *mut u16, data: svuint64_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -38241,7 +38241,7 @@ pub unsafe fn svst1w_u64(pg: svbool_t, base: *mut u32, data: svuint64_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38275,7 +38275,7 @@ pub unsafe fn svst1b_scatter_s32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38309,7 +38309,7 @@ pub unsafe fn svst1h_scatter_s32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38326,7 +38326,7 @@ pub unsafe fn svst1b_scatter_s32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38343,7 +38343,7 @@ pub unsafe fn svst1h_scatter_s32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38377,7 +38377,7 @@ pub unsafe fn svst1b_scatter_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38411,7 +38411,7 @@ pub unsafe fn svst1h_scatter_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -38445,7 +38445,7 @@ pub unsafe fn svst1w_scatter_s64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38462,7 +38462,7 @@ pub unsafe fn svst1b_scatter_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38479,7 +38479,7 @@ pub unsafe fn svst1h_scatter_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -38496,7 +38496,7 @@ pub unsafe fn svst1w_scatter_s64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38530,7 +38530,7 @@ pub unsafe fn svst1b_scatter_u32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38564,7 +38564,7 @@ pub unsafe fn svst1h_scatter_u32offset_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38581,7 +38581,7 @@ pub unsafe fn svst1b_scatter_u32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38598,7 +38598,7 @@ pub unsafe fn svst1h_scatter_u32offset_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38615,7 +38615,7 @@ pub unsafe fn svst1b_scatter_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38632,7 +38632,7 @@ pub unsafe fn svst1h_scatter_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -38649,7 +38649,7 @@ pub unsafe fn svst1w_scatter_u64offset_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38666,7 +38666,7 @@ pub unsafe fn svst1b_scatter_u64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38683,7 +38683,7 @@ pub unsafe fn svst1h_scatter_u64offset_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -38701,7 +38701,7 @@ pub unsafe fn svst1w_scatter_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38736,7 +38736,7 @@ pub unsafe fn svst1b_scatter_u32base_offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38771,7 +38771,7 @@ pub unsafe fn svst1h_scatter_u32base_offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38789,7 +38789,7 @@ pub unsafe fn svst1b_scatter_u32base_offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38807,7 +38807,7 @@ pub unsafe fn svst1h_scatter_u32base_offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38842,7 +38842,7 @@ pub unsafe fn svst1b_scatter_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38877,7 +38877,7 @@ pub unsafe fn svst1h_scatter_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -38912,7 +38912,7 @@ pub unsafe fn svst1w_scatter_u64base_offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38930,7 +38930,7 @@ pub unsafe fn svst1b_scatter_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38948,7 +38948,7 @@ pub unsafe fn svst1h_scatter_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -38966,7 +38966,7 @@ pub unsafe fn svst1w_scatter_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -38979,7 +38979,7 @@ pub unsafe fn svst1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -38992,7 +38992,7 @@ pub unsafe fn svst1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -39005,7 +39005,7 @@ pub unsafe fn svst1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39018,7 +39018,7 @@ pub unsafe fn svst1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -39031,7 +39031,7 @@ pub unsafe fn svst1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39044,7 +39044,7 @@ pub unsafe fn svst1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -39057,7 +39057,7 @@ pub unsafe fn svst1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -39070,7 +39070,7 @@ pub unsafe fn svst1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39083,7 +39083,7 @@ pub unsafe fn svst1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -39095,7 +39095,7 @@ pub unsafe fn svst1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -39107,7 +39107,7 @@ pub unsafe fn svst1b_vnum_s16(pg: svbool_t, base: *mut i8, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -39119,7 +39119,7 @@ pub unsafe fn svst1b_vnum_s32(pg: svbool_t, base: *mut i8, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39131,7 +39131,7 @@ pub unsafe fn svst1h_vnum_s32(pg: svbool_t, base: *mut i16, vnum: i64, data: svi #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -39143,7 +39143,7 @@ pub unsafe fn svst1b_vnum_s64(pg: svbool_t, base: *mut i8, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39155,7 +39155,7 @@ pub unsafe fn svst1h_vnum_s64(pg: svbool_t, base: *mut i16, vnum: i64, data: svi #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -39167,7 +39167,7 @@ pub unsafe fn svst1w_vnum_s64(pg: svbool_t, base: *mut i32, vnum: i64, data: svi #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -39179,7 +39179,7 @@ pub unsafe fn svst1b_vnum_u16(pg: svbool_t, base: *mut u8, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -39191,7 +39191,7 @@ pub unsafe fn svst1b_vnum_u32(pg: svbool_t, base: *mut u8, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39203,7 +39203,7 @@ pub unsafe fn svst1h_vnum_u32(pg: svbool_t, base: *mut u16, vnum: i64, data: svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1b))] @@ -39215,7 +39215,7 @@ pub unsafe fn svst1b_vnum_u64(pg: svbool_t, base: *mut u8, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39227,7 +39227,7 @@ pub unsafe fn svst1h_vnum_u64(pg: svbool_t, base: *mut u16, vnum: i64, data: svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -39239,7 +39239,7 @@ pub unsafe fn svst1w_vnum_u64(pg: svbool_t, base: *mut u32, vnum: i64, data: svu #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39273,7 +39273,7 @@ pub unsafe fn svst1h_scatter_s32index_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39290,7 +39290,7 @@ pub unsafe fn svst1h_scatter_s32index_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39324,7 +39324,7 @@ pub unsafe fn svst1h_scatter_s64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -39358,7 +39358,7 @@ pub unsafe fn svst1w_scatter_s64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39375,7 +39375,7 @@ pub unsafe fn svst1h_scatter_s64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -39392,7 +39392,7 @@ pub unsafe fn svst1w_scatter_s64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39426,7 +39426,7 @@ pub unsafe fn svst1h_scatter_u32index_s32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39443,7 +39443,7 @@ pub unsafe fn svst1h_scatter_u32index_u32( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39460,7 +39460,7 @@ pub unsafe fn svst1h_scatter_u64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -39477,7 +39477,7 @@ pub unsafe fn svst1w_scatter_u64index_s64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39494,7 +39494,7 @@ pub unsafe fn svst1h_scatter_u64index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -39512,7 +39512,7 @@ pub unsafe fn svst1w_scatter_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39530,7 +39530,7 @@ pub unsafe fn svst1h_scatter_u32base_index_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39548,7 +39548,7 @@ pub unsafe fn svst1h_scatter_u32base_index_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39566,7 +39566,7 @@ pub unsafe fn svst1h_scatter_u64base_index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -39584,7 +39584,7 @@ pub unsafe fn svst1w_scatter_u64base_index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1h))] @@ -39602,7 +39602,7 @@ pub unsafe fn svst1h_scatter_u64base_index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st1w))] @@ -39619,7 +39619,7 @@ pub unsafe fn svst1w_scatter_u64base_index_u64( #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2w))] @@ -39640,7 +39640,7 @@ pub unsafe fn svst2_f32(pg: svbool_t, base: *mut f32, data: svfloat32x2_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2d))] @@ -39661,7 +39661,7 @@ pub unsafe fn svst2_f64(pg: svbool_t, base: *mut f64, data: svfloat64x2_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2b))] @@ -39677,7 +39677,7 @@ pub unsafe fn svst2_s8(pg: svbool_t, base: *mut i8, data: svint8x2_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2h))] @@ -39698,7 +39698,7 @@ pub unsafe fn svst2_s16(pg: svbool_t, base: *mut i16, data: svint16x2_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2w))] @@ -39719,7 +39719,7 @@ pub unsafe fn svst2_s32(pg: svbool_t, base: *mut i32, data: svint32x2_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2d))] @@ -39740,7 +39740,7 @@ pub unsafe fn svst2_s64(pg: svbool_t, base: *mut i64, data: svint64x2_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2b))] @@ -39752,7 +39752,7 @@ pub unsafe fn svst2_u8(pg: svbool_t, base: *mut u8, data: svuint8x2_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2h))] @@ -39764,7 +39764,7 @@ pub unsafe fn svst2_u16(pg: svbool_t, base: *mut u16, data: svuint16x2_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2w))] @@ -39776,7 +39776,7 @@ pub unsafe fn svst2_u32(pg: svbool_t, base: *mut u32, data: svuint32x2_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2d))] @@ -39788,7 +39788,7 @@ pub unsafe fn svst2_u64(pg: svbool_t, base: *mut u64, data: svuint64x2_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2w))] @@ -39800,7 +39800,7 @@ pub unsafe fn svst2_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfl #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2d))] @@ -39812,7 +39812,7 @@ pub unsafe fn svst2_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfl #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2b))] @@ -39824,7 +39824,7 @@ pub unsafe fn svst2_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8 #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2h))] @@ -39836,7 +39836,7 @@ pub unsafe fn svst2_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2w))] @@ -39848,7 +39848,7 @@ pub unsafe fn svst2_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2d))] @@ -39860,7 +39860,7 @@ pub unsafe fn svst2_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2b))] @@ -39872,7 +39872,7 @@ pub unsafe fn svst2_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2h))] @@ -39884,7 +39884,7 @@ pub unsafe fn svst2_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2w))] @@ -39896,7 +39896,7 @@ pub unsafe fn svst2_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st2d))] @@ -39908,7 +39908,7 @@ pub unsafe fn svst2_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3w))] @@ -39936,7 +39936,7 @@ pub unsafe fn svst3_f32(pg: svbool_t, base: *mut f32, data: svfloat32x3_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3d))] @@ -39964,7 +39964,7 @@ pub unsafe fn svst3_f64(pg: svbool_t, base: *mut f64, data: svfloat64x3_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3b))] @@ -39986,7 +39986,7 @@ pub unsafe fn svst3_s8(pg: svbool_t, base: *mut i8, data: svint8x3_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3h))] @@ -40014,7 +40014,7 @@ pub unsafe fn svst3_s16(pg: svbool_t, base: *mut i16, data: svint16x3_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3w))] @@ -40042,7 +40042,7 @@ pub unsafe fn svst3_s32(pg: svbool_t, base: *mut i32, data: svint32x3_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3d))] @@ -40070,7 +40070,7 @@ pub unsafe fn svst3_s64(pg: svbool_t, base: *mut i64, data: svint64x3_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3b))] @@ -40082,7 +40082,7 @@ pub unsafe fn svst3_u8(pg: svbool_t, base: *mut u8, data: svuint8x3_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3h))] @@ -40094,7 +40094,7 @@ pub unsafe fn svst3_u16(pg: svbool_t, base: *mut u16, data: svuint16x3_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3w))] @@ -40106,7 +40106,7 @@ pub unsafe fn svst3_u32(pg: svbool_t, base: *mut u32, data: svuint32x3_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3d))] @@ -40118,7 +40118,7 @@ pub unsafe fn svst3_u64(pg: svbool_t, base: *mut u64, data: svuint64x3_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3w))] @@ -40130,7 +40130,7 @@ pub unsafe fn svst3_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfl #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3d))] @@ -40142,7 +40142,7 @@ pub unsafe fn svst3_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfl #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3b))] @@ -40154,7 +40154,7 @@ pub unsafe fn svst3_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8 #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3h))] @@ -40166,7 +40166,7 @@ pub unsafe fn svst3_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3w))] @@ -40178,7 +40178,7 @@ pub unsafe fn svst3_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3d))] @@ -40190,7 +40190,7 @@ pub unsafe fn svst3_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3b))] @@ -40202,7 +40202,7 @@ pub unsafe fn svst3_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3h))] @@ -40214,7 +40214,7 @@ pub unsafe fn svst3_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3w))] @@ -40226,7 +40226,7 @@ pub unsafe fn svst3_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st3d))] @@ -40238,7 +40238,7 @@ pub unsafe fn svst3_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4w))] @@ -40268,7 +40268,7 @@ pub unsafe fn svst4_f32(pg: svbool_t, base: *mut f32, data: svfloat32x4_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4d))] @@ -40298,7 +40298,7 @@ pub unsafe fn svst4_f64(pg: svbool_t, base: *mut f64, data: svfloat64x4_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4b))] @@ -40328,7 +40328,7 @@ pub unsafe fn svst4_s8(pg: svbool_t, base: *mut i8, data: svint8x4_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4h))] @@ -40358,7 +40358,7 @@ pub unsafe fn svst4_s16(pg: svbool_t, base: *mut i16, data: svint16x4_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4w))] @@ -40388,7 +40388,7 @@ pub unsafe fn svst4_s32(pg: svbool_t, base: *mut i32, data: svint32x4_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4d))] @@ -40418,7 +40418,7 @@ pub unsafe fn svst4_s64(pg: svbool_t, base: *mut i64, data: svint64x4_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4b))] @@ -40430,7 +40430,7 @@ pub unsafe fn svst4_u8(pg: svbool_t, base: *mut u8, data: svuint8x4_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4h))] @@ -40442,7 +40442,7 @@ pub unsafe fn svst4_u16(pg: svbool_t, base: *mut u16, data: svuint16x4_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4w))] @@ -40454,7 +40454,7 @@ pub unsafe fn svst4_u32(pg: svbool_t, base: *mut u32, data: svuint32x4_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4d))] @@ -40466,7 +40466,7 @@ pub unsafe fn svst4_u64(pg: svbool_t, base: *mut u64, data: svuint64x4_t) { #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4w))] @@ -40478,7 +40478,7 @@ pub unsafe fn svst4_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: svfl #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4d))] @@ -40490,7 +40490,7 @@ pub unsafe fn svst4_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: svfl #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4b))] @@ -40502,7 +40502,7 @@ pub unsafe fn svst4_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svint8 #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4h))] @@ -40514,7 +40514,7 @@ pub unsafe fn svst4_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4w))] @@ -40526,7 +40526,7 @@ pub unsafe fn svst4_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4d))] @@ -40538,7 +40538,7 @@ pub unsafe fn svst4_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: svin #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4b))] @@ -40550,7 +40550,7 @@ pub unsafe fn svst4_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svuint #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4h))] @@ -40562,7 +40562,7 @@ pub unsafe fn svst4_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4w))] @@ -40574,7 +40574,7 @@ pub unsafe fn svst4_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: svui #[doc = "## Safety"] #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`). In particular, note that `vnum` is scaled by the vector length, `VL`, which is not known at compile time."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(st4d))] @@ -40587,7 +40587,7 @@ pub unsafe fn svst4_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: svui #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -40604,7 +40604,7 @@ pub unsafe fn svstnt1_f32(pg: svbool_t, base: *mut f32, data: svfloat32_t) { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -40621,7 +40621,7 @@ pub unsafe fn svstnt1_f64(pg: svbool_t, base: *mut f64, data: svfloat64_t) { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -40638,7 +40638,7 @@ pub unsafe fn svstnt1_s8(pg: svbool_t, base: *mut i8, data: svint8_t) { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -40655,7 +40655,7 @@ pub unsafe fn svstnt1_s16(pg: svbool_t, base: *mut i16, data: svint16_t) { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -40672,7 +40672,7 @@ pub unsafe fn svstnt1_s32(pg: svbool_t, base: *mut i32, data: svint32_t) { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -40689,7 +40689,7 @@ pub unsafe fn svstnt1_s64(pg: svbool_t, base: *mut i64, data: svint64_t) { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -40702,7 +40702,7 @@ pub unsafe fn svstnt1_u8(pg: svbool_t, base: *mut u8, data: svuint8_t) { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -40715,7 +40715,7 @@ pub unsafe fn svstnt1_u16(pg: svbool_t, base: *mut u16, data: svuint16_t) { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -40728,7 +40728,7 @@ pub unsafe fn svstnt1_u32(pg: svbool_t, base: *mut u32, data: svuint32_t) { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -40741,7 +40741,7 @@ pub unsafe fn svstnt1_u64(pg: svbool_t, base: *mut u64, data: svuint64_t) { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -40754,7 +40754,7 @@ pub unsafe fn svstnt1_vnum_f32(pg: svbool_t, base: *mut f32, vnum: i64, data: sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -40767,7 +40767,7 @@ pub unsafe fn svstnt1_vnum_f64(pg: svbool_t, base: *mut f64, vnum: i64, data: sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -40780,7 +40780,7 @@ pub unsafe fn svstnt1_vnum_s8(pg: svbool_t, base: *mut i8, vnum: i64, data: svin #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -40793,7 +40793,7 @@ pub unsafe fn svstnt1_vnum_s16(pg: svbool_t, base: *mut i16, vnum: i64, data: sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -40806,7 +40806,7 @@ pub unsafe fn svstnt1_vnum_s32(pg: svbool_t, base: *mut i32, vnum: i64, data: sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -40819,7 +40819,7 @@ pub unsafe fn svstnt1_vnum_s64(pg: svbool_t, base: *mut i64, vnum: i64, data: sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -40832,7 +40832,7 @@ pub unsafe fn svstnt1_vnum_u8(pg: svbool_t, base: *mut u8, vnum: i64, data: svui #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -40845,7 +40845,7 @@ pub unsafe fn svstnt1_vnum_u16(pg: svbool_t, base: *mut u16, vnum: i64, data: sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -40858,7 +40858,7 @@ pub unsafe fn svstnt1_vnum_u32(pg: svbool_t, base: *mut u32, vnum: i64, data: sv #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -40867,7 +40867,7 @@ pub unsafe fn svstnt1_vnum_u64(pg: svbool_t, base: *mut u64, vnum: i64, data: sv } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40880,7 +40880,7 @@ pub fn svsub_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40889,7 +40889,7 @@ pub fn svsub_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40898,7 +40898,7 @@ pub fn svsub_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40907,7 +40907,7 @@ pub fn svsub_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40916,7 +40916,7 @@ pub fn svsub_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat3 } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40925,7 +40925,7 @@ pub fn svsub_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40938,7 +40938,7 @@ pub fn svsub_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40947,7 +40947,7 @@ pub fn svsub_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40956,7 +40956,7 @@ pub fn svsub_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40965,7 +40965,7 @@ pub fn svsub_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40974,7 +40974,7 @@ pub fn svsub_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat6 } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsub))] @@ -40983,7 +40983,7 @@ pub fn svsub_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -40996,7 +40996,7 @@ pub fn svsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41005,7 +41005,7 @@ pub fn svsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41014,7 +41014,7 @@ pub fn svsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41023,7 +41023,7 @@ pub fn svsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41032,7 +41032,7 @@ pub fn svsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41041,7 +41041,7 @@ pub fn svsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41054,7 +41054,7 @@ pub fn svsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41063,7 +41063,7 @@ pub fn svsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41072,7 +41072,7 @@ pub fn svsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41081,7 +41081,7 @@ pub fn svsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41090,7 +41090,7 @@ pub fn svsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41099,7 +41099,7 @@ pub fn svsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41112,7 +41112,7 @@ pub fn svsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41121,7 +41121,7 @@ pub fn svsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41130,7 +41130,7 @@ pub fn svsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41139,7 +41139,7 @@ pub fn svsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41148,7 +41148,7 @@ pub fn svsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41157,7 +41157,7 @@ pub fn svsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41170,7 +41170,7 @@ pub fn svsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41179,7 +41179,7 @@ pub fn svsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41188,7 +41188,7 @@ pub fn svsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41197,7 +41197,7 @@ pub fn svsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41206,7 +41206,7 @@ pub fn svsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41215,7 +41215,7 @@ pub fn svsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41224,7 +41224,7 @@ pub fn svsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41233,7 +41233,7 @@ pub fn svsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41242,7 +41242,7 @@ pub fn svsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41251,7 +41251,7 @@ pub fn svsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41260,7 +41260,7 @@ pub fn svsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41269,7 +41269,7 @@ pub fn svsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41278,7 +41278,7 @@ pub fn svsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41287,7 +41287,7 @@ pub fn svsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41296,7 +41296,7 @@ pub fn svsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41305,7 +41305,7 @@ pub fn svsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41314,7 +41314,7 @@ pub fn svsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_t } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41323,7 +41323,7 @@ pub fn svsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41332,7 +41332,7 @@ pub fn svsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41341,7 +41341,7 @@ pub fn svsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41350,7 +41350,7 @@ pub fn svsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41359,7 +41359,7 @@ pub fn svsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41368,7 +41368,7 @@ pub fn svsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_t } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41377,7 +41377,7 @@ pub fn svsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41386,7 +41386,7 @@ pub fn svsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41395,7 +41395,7 @@ pub fn svsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41404,7 +41404,7 @@ pub fn svsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41413,7 +41413,7 @@ pub fn svsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41422,7 +41422,7 @@ pub fn svsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_t } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsub[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sub))] @@ -41431,7 +41431,7 @@ pub fn svsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41444,7 +41444,7 @@ pub fn svsubr_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41453,7 +41453,7 @@ pub fn svsubr_n_f32_m(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41462,7 +41462,7 @@ pub fn svsubr_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41471,7 +41471,7 @@ pub fn svsubr_n_f32_x(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41480,7 +41480,7 @@ pub fn svsubr_f32_z(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41489,7 +41489,7 @@ pub fn svsubr_n_f32_z(pg: svbool_t, op1: svfloat32_t, op2: f32) -> svfloat32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41502,7 +41502,7 @@ pub fn svsubr_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41511,7 +41511,7 @@ pub fn svsubr_n_f64_m(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41520,7 +41520,7 @@ pub fn svsubr_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41529,7 +41529,7 @@ pub fn svsubr_n_f64_x(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41538,7 +41538,7 @@ pub fn svsubr_f64_z(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fsubr))] @@ -41547,7 +41547,7 @@ pub fn svsubr_n_f64_z(pg: svbool_t, op1: svfloat64_t, op2: f64) -> svfloat64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41560,7 +41560,7 @@ pub fn svsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41569,7 +41569,7 @@ pub fn svsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41578,7 +41578,7 @@ pub fn svsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41587,7 +41587,7 @@ pub fn svsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41596,7 +41596,7 @@ pub fn svsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41605,7 +41605,7 @@ pub fn svsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41618,7 +41618,7 @@ pub fn svsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41627,7 +41627,7 @@ pub fn svsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41636,7 +41636,7 @@ pub fn svsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41645,7 +41645,7 @@ pub fn svsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41654,7 +41654,7 @@ pub fn svsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41663,7 +41663,7 @@ pub fn svsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41676,7 +41676,7 @@ pub fn svsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41685,7 +41685,7 @@ pub fn svsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41694,7 +41694,7 @@ pub fn svsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41703,7 +41703,7 @@ pub fn svsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41712,7 +41712,7 @@ pub fn svsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41721,7 +41721,7 @@ pub fn svsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41734,7 +41734,7 @@ pub fn svsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41743,7 +41743,7 @@ pub fn svsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41752,7 +41752,7 @@ pub fn svsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41761,7 +41761,7 @@ pub fn svsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41770,7 +41770,7 @@ pub fn svsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41779,7 +41779,7 @@ pub fn svsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41788,7 +41788,7 @@ pub fn svsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41797,7 +41797,7 @@ pub fn svsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41806,7 +41806,7 @@ pub fn svsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41815,7 +41815,7 @@ pub fn svsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41824,7 +41824,7 @@ pub fn svsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41833,7 +41833,7 @@ pub fn svsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41842,7 +41842,7 @@ pub fn svsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41851,7 +41851,7 @@ pub fn svsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41860,7 +41860,7 @@ pub fn svsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41869,7 +41869,7 @@ pub fn svsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41878,7 +41878,7 @@ pub fn svsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41887,7 +41887,7 @@ pub fn svsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41896,7 +41896,7 @@ pub fn svsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41905,7 +41905,7 @@ pub fn svsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41914,7 +41914,7 @@ pub fn svsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41923,7 +41923,7 @@ pub fn svsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41932,7 +41932,7 @@ pub fn svsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41941,7 +41941,7 @@ pub fn svsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41950,7 +41950,7 @@ pub fn svsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41959,7 +41959,7 @@ pub fn svsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41968,7 +41968,7 @@ pub fn svsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41977,7 +41977,7 @@ pub fn svsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41986,7 +41986,7 @@ pub fn svsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubr[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subr))] @@ -41995,7 +41995,7 @@ pub fn svsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Dot product (signed × unsigned)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,i8mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sudot, IMM_INDEX = 0))] @@ -42021,7 +42021,7 @@ pub fn svsudot_lane_s32( } #[doc = "Dot product (signed × unsigned)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,i8mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usdot))] @@ -42030,7 +42030,7 @@ pub fn svsudot_s32(op1: svint32_t, op2: svint8_t, op3: svuint8_t) -> svint32_t { } #[doc = "Dot product (signed × unsigned)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsudot[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,i8mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usdot))] @@ -42039,7 +42039,7 @@ pub fn svsudot_n_s32(op1: svint32_t, op2: svint8_t, op3: u8) -> svint32_t { } #[doc = "Table lookup in single-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -42052,7 +42052,7 @@ pub fn svtbl_f32(data: svfloat32_t, indices: svuint32_t) -> svfloat32_t { } #[doc = "Table lookup in single-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -42065,7 +42065,7 @@ pub fn svtbl_f64(data: svfloat64_t, indices: svuint64_t) -> svfloat64_t { } #[doc = "Table lookup in single-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -42078,7 +42078,7 @@ pub fn svtbl_s8(data: svint8_t, indices: svuint8_t) -> svint8_t { } #[doc = "Table lookup in single-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -42091,7 +42091,7 @@ pub fn svtbl_s16(data: svint16_t, indices: svuint16_t) -> svint16_t { } #[doc = "Table lookup in single-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -42104,7 +42104,7 @@ pub fn svtbl_s32(data: svint32_t, indices: svuint32_t) -> svint32_t { } #[doc = "Table lookup in single-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -42117,7 +42117,7 @@ pub fn svtbl_s64(data: svint64_t, indices: svuint64_t) -> svint64_t { } #[doc = "Table lookup in single-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -42126,7 +42126,7 @@ pub fn svtbl_u8(data: svuint8_t, indices: svuint8_t) -> svuint8_t { } #[doc = "Table lookup in single-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -42135,7 +42135,7 @@ pub fn svtbl_u16(data: svuint16_t, indices: svuint16_t) -> svuint16_t { } #[doc = "Table lookup in single-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -42144,7 +42144,7 @@ pub fn svtbl_u32(data: svuint32_t, indices: svuint32_t) -> svuint32_t { } #[doc = "Table lookup in single-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -42153,7 +42153,7 @@ pub fn svtbl_u64(data: svuint64_t, indices: svuint64_t) -> svuint64_t { } #[doc = "Trigonometric multiply-add coefficient"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] @@ -42170,7 +42170,7 @@ pub fn svtmad_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloa } #[doc = "Trigonometric multiply-add coefficient"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtmad[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ftmad, IMM3 = 0))] @@ -42187,7 +42187,7 @@ pub fn svtmad_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloa } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42200,7 +42200,7 @@ pub fn svtrn1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42213,7 +42213,7 @@ pub fn svtrn1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42226,7 +42226,7 @@ pub fn svtrn1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42239,7 +42239,7 @@ pub fn svtrn1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42252,7 +42252,7 @@ pub fn svtrn1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42265,7 +42265,7 @@ pub fn svtrn1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42278,7 +42278,7 @@ pub fn svtrn1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42291,7 +42291,7 @@ pub fn svtrn1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42304,7 +42304,7 @@ pub fn svtrn1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42317,7 +42317,7 @@ pub fn svtrn1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42326,7 +42326,7 @@ pub fn svtrn1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42335,7 +42335,7 @@ pub fn svtrn1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42344,7 +42344,7 @@ pub fn svtrn1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Interleave even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42353,7 +42353,7 @@ pub fn svtrn1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Interleave even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42366,7 +42366,7 @@ pub fn svtrn1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Interleave even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42379,7 +42379,7 @@ pub fn svtrn1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Interleave even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42392,7 +42392,7 @@ pub fn svtrn1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Interleave even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42405,7 +42405,7 @@ pub fn svtrn1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Interleave even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42418,7 +42418,7 @@ pub fn svtrn1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Interleave even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42431,7 +42431,7 @@ pub fn svtrn1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Interleave even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42440,7 +42440,7 @@ pub fn svtrn1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Interleave even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42449,7 +42449,7 @@ pub fn svtrn1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Interleave even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42458,7 +42458,7 @@ pub fn svtrn1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Interleave even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn1q[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn1))] @@ -42467,7 +42467,7 @@ pub fn svtrn1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42480,7 +42480,7 @@ pub fn svtrn2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42493,7 +42493,7 @@ pub fn svtrn2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42506,7 +42506,7 @@ pub fn svtrn2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42519,7 +42519,7 @@ pub fn svtrn2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42532,7 +42532,7 @@ pub fn svtrn2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42545,7 +42545,7 @@ pub fn svtrn2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42558,7 +42558,7 @@ pub fn svtrn2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42571,7 +42571,7 @@ pub fn svtrn2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42584,7 +42584,7 @@ pub fn svtrn2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42597,7 +42597,7 @@ pub fn svtrn2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42606,7 +42606,7 @@ pub fn svtrn2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42615,7 +42615,7 @@ pub fn svtrn2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42624,7 +42624,7 @@ pub fn svtrn2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Interleave odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42633,7 +42633,7 @@ pub fn svtrn2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Interleave odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42646,7 +42646,7 @@ pub fn svtrn2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Interleave odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42659,7 +42659,7 @@ pub fn svtrn2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Interleave odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42672,7 +42672,7 @@ pub fn svtrn2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Interleave odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42685,7 +42685,7 @@ pub fn svtrn2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Interleave odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42698,7 +42698,7 @@ pub fn svtrn2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Interleave odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42711,7 +42711,7 @@ pub fn svtrn2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Interleave odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42720,7 +42720,7 @@ pub fn svtrn2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Interleave odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42729,7 +42729,7 @@ pub fn svtrn2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Interleave odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42738,7 +42738,7 @@ pub fn svtrn2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Interleave odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtrn2q[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(trn2))] @@ -42747,7 +42747,7 @@ pub fn svtrn2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Trigonometric starting value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ftsmul))] @@ -42763,7 +42763,7 @@ pub fn svtsmul_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { } #[doc = "Trigonometric starting value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtsmul[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ftsmul))] @@ -42779,7 +42779,7 @@ pub fn svtsmul_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { } #[doc = "Trigonometric select coefficient"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ftssel))] @@ -42795,7 +42795,7 @@ pub fn svtssel_f32(op1: svfloat32_t, op2: svuint32_t) -> svfloat32_t { } #[doc = "Trigonometric select coefficient"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtssel[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ftssel))] @@ -42813,7 +42813,7 @@ pub fn svtssel_f64(op1: svfloat64_t, op2: svuint64_t) -> svfloat64_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef2_f32() -> svfloat32x2_t { @@ -42823,7 +42823,7 @@ pub unsafe fn svundef2_f32() -> svfloat32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_f64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef2_f64() -> svfloat64x2_t { @@ -42833,7 +42833,7 @@ pub unsafe fn svundef2_f64() -> svfloat64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s8)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef2_s8() -> svint8x2_t { @@ -42843,7 +42843,7 @@ pub unsafe fn svundef2_s8() -> svint8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s16)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef2_s16() -> svint16x2_t { @@ -42853,7 +42853,7 @@ pub unsafe fn svundef2_s16() -> svint16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef2_s32() -> svint32x2_t { @@ -42863,7 +42863,7 @@ pub unsafe fn svundef2_s32() -> svint32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_s64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef2_s64() -> svint64x2_t { @@ -42873,7 +42873,7 @@ pub unsafe fn svundef2_s64() -> svint64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u8)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef2_u8() -> svuint8x2_t { @@ -42883,7 +42883,7 @@ pub unsafe fn svundef2_u8() -> svuint8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u16)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef2_u16() -> svuint16x2_t { @@ -42893,7 +42893,7 @@ pub unsafe fn svundef2_u16() -> svuint16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef2_u32() -> svuint32x2_t { @@ -42903,7 +42903,7 @@ pub unsafe fn svundef2_u32() -> svuint32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef2_u64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef2_u64() -> svuint64x2_t { @@ -42913,7 +42913,7 @@ pub unsafe fn svundef2_u64() -> svuint64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef3_f32() -> svfloat32x3_t { @@ -42923,7 +42923,7 @@ pub unsafe fn svundef3_f32() -> svfloat32x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_f64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef3_f64() -> svfloat64x3_t { @@ -42933,7 +42933,7 @@ pub unsafe fn svundef3_f64() -> svfloat64x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s8)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef3_s8() -> svint8x3_t { @@ -42943,7 +42943,7 @@ pub unsafe fn svundef3_s8() -> svint8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s16)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef3_s16() -> svint16x3_t { @@ -42953,7 +42953,7 @@ pub unsafe fn svundef3_s16() -> svint16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef3_s32() -> svint32x3_t { @@ -42963,7 +42963,7 @@ pub unsafe fn svundef3_s32() -> svint32x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_s64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef3_s64() -> svint64x3_t { @@ -42973,7 +42973,7 @@ pub unsafe fn svundef3_s64() -> svint64x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u8)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef3_u8() -> svuint8x3_t { @@ -42983,7 +42983,7 @@ pub unsafe fn svundef3_u8() -> svuint8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u16)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef3_u16() -> svuint16x3_t { @@ -42993,7 +42993,7 @@ pub unsafe fn svundef3_u16() -> svuint16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef3_u32() -> svuint32x3_t { @@ -43003,7 +43003,7 @@ pub unsafe fn svundef3_u32() -> svuint32x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef3_u64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef3_u64() -> svuint64x3_t { @@ -43013,7 +43013,7 @@ pub unsafe fn svundef3_u64() -> svuint64x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef4_f32() -> svfloat32x4_t { @@ -43028,7 +43028,7 @@ pub unsafe fn svundef4_f32() -> svfloat32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_f64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef4_f64() -> svfloat64x4_t { @@ -43043,7 +43043,7 @@ pub unsafe fn svundef4_f64() -> svfloat64x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s8)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef4_s8() -> svint8x4_t { @@ -43053,7 +43053,7 @@ pub unsafe fn svundef4_s8() -> svint8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s16)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef4_s16() -> svint16x4_t { @@ -43068,7 +43068,7 @@ pub unsafe fn svundef4_s16() -> svint16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef4_s32() -> svint32x4_t { @@ -43083,7 +43083,7 @@ pub unsafe fn svundef4_s32() -> svint32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_s64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef4_s64() -> svint64x4_t { @@ -43098,7 +43098,7 @@ pub unsafe fn svundef4_s64() -> svint64x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u8)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef4_u8() -> svuint8x4_t { @@ -43108,7 +43108,7 @@ pub unsafe fn svundef4_u8() -> svuint8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u16)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef4_u16() -> svuint16x4_t { @@ -43123,7 +43123,7 @@ pub unsafe fn svundef4_u16() -> svuint16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef4_u32() -> svuint32x4_t { @@ -43138,7 +43138,7 @@ pub unsafe fn svundef4_u32() -> svuint32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef4_u64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef4_u64() -> svuint64x4_t { @@ -43153,7 +43153,7 @@ pub unsafe fn svundef4_u64() -> svuint64x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef_f32() -> svfloat32_t { @@ -43163,7 +43163,7 @@ pub unsafe fn svundef_f32() -> svfloat32_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_f64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef_f64() -> svfloat64_t { @@ -43173,7 +43173,7 @@ pub unsafe fn svundef_f64() -> svfloat64_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s8)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef_s8() -> svint8_t { @@ -43183,7 +43183,7 @@ pub unsafe fn svundef_s8() -> svint8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s16)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef_s16() -> svint16_t { @@ -43193,7 +43193,7 @@ pub unsafe fn svundef_s16() -> svint16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef_s32() -> svint32_t { @@ -43203,7 +43203,7 @@ pub unsafe fn svundef_s32() -> svint32_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_s64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef_s64() -> svint64_t { @@ -43213,7 +43213,7 @@ pub unsafe fn svundef_s64() -> svint64_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u8)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef_u8() -> svuint8_t { @@ -43223,7 +43223,7 @@ pub unsafe fn svundef_u8() -> svuint8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u16)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef_u16() -> svuint16_t { @@ -43233,7 +43233,7 @@ pub unsafe fn svundef_u16() -> svuint16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u32)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef_u32() -> svuint32_t { @@ -43243,7 +43243,7 @@ pub unsafe fn svundef_u32() -> svuint32_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svundef_u64)"] #[doc = "## Safety"] #[doc = " * This creates an uninitialized value, and may be unsound (like [`core::mem::uninitialized`])."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] pub unsafe fn svundef_u64() -> svuint64_t { @@ -43251,7 +43251,7 @@ pub unsafe fn svundef_u64() -> svuint64_t { } #[doc = "Dot product (unsigned × signed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,i8mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usdot, IMM_INDEX = 0))] @@ -43277,7 +43277,7 @@ pub fn svusdot_lane_s32( } #[doc = "Dot product (unsigned × signed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,i8mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usdot))] @@ -43290,7 +43290,7 @@ pub fn svusdot_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t { } #[doc = "Dot product (unsigned × signed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusdot[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,i8mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usdot))] @@ -43299,7 +43299,7 @@ pub fn svusdot_n_s32(op1: svint32_t, op2: svuint8_t, op3: i8) -> svint32_t { } #[doc = "Matrix multiply-accumulate (unsigned × signed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svusmmla[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,i8mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usmmla))] @@ -43312,7 +43312,7 @@ pub fn svusmmla_s32(op1: svint32_t, op2: svuint8_t, op3: svint8_t) -> svint32_t } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43325,7 +43325,7 @@ pub fn svuzp1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43338,7 +43338,7 @@ pub fn svuzp1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43351,7 +43351,7 @@ pub fn svuzp1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43364,7 +43364,7 @@ pub fn svuzp1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43377,7 +43377,7 @@ pub fn svuzp1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43390,7 +43390,7 @@ pub fn svuzp1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43403,7 +43403,7 @@ pub fn svuzp1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43416,7 +43416,7 @@ pub fn svuzp1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43429,7 +43429,7 @@ pub fn svuzp1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43442,7 +43442,7 @@ pub fn svuzp1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43451,7 +43451,7 @@ pub fn svuzp1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43460,7 +43460,7 @@ pub fn svuzp1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43469,7 +43469,7 @@ pub fn svuzp1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Concatenate even elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43478,7 +43478,7 @@ pub fn svuzp1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Concatenate even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43491,7 +43491,7 @@ pub fn svuzp1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Concatenate even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43504,7 +43504,7 @@ pub fn svuzp1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Concatenate even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43517,7 +43517,7 @@ pub fn svuzp1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Concatenate even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43530,7 +43530,7 @@ pub fn svuzp1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Concatenate even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43543,7 +43543,7 @@ pub fn svuzp1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Concatenate even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43556,7 +43556,7 @@ pub fn svuzp1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Concatenate even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43565,7 +43565,7 @@ pub fn svuzp1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Concatenate even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43574,7 +43574,7 @@ pub fn svuzp1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Concatenate even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43583,7 +43583,7 @@ pub fn svuzp1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Concatenate even quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp1q[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp1))] @@ -43592,7 +43592,7 @@ pub fn svuzp1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43605,7 +43605,7 @@ pub fn svuzp2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43618,7 +43618,7 @@ pub fn svuzp2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43631,7 +43631,7 @@ pub fn svuzp2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43644,7 +43644,7 @@ pub fn svuzp2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43657,7 +43657,7 @@ pub fn svuzp2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43670,7 +43670,7 @@ pub fn svuzp2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43683,7 +43683,7 @@ pub fn svuzp2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43696,7 +43696,7 @@ pub fn svuzp2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43709,7 +43709,7 @@ pub fn svuzp2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43722,7 +43722,7 @@ pub fn svuzp2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43731,7 +43731,7 @@ pub fn svuzp2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43740,7 +43740,7 @@ pub fn svuzp2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43749,7 +43749,7 @@ pub fn svuzp2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Concatenate odd elements from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43758,7 +43758,7 @@ pub fn svuzp2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Concatenate odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43771,7 +43771,7 @@ pub fn svuzp2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Concatenate odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43784,7 +43784,7 @@ pub fn svuzp2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Concatenate odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43797,7 +43797,7 @@ pub fn svuzp2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Concatenate odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43810,7 +43810,7 @@ pub fn svuzp2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Concatenate odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43823,7 +43823,7 @@ pub fn svuzp2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Concatenate odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43836,7 +43836,7 @@ pub fn svuzp2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Concatenate odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43845,7 +43845,7 @@ pub fn svuzp2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Concatenate odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43854,7 +43854,7 @@ pub fn svuzp2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Concatenate odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43863,7 +43863,7 @@ pub fn svuzp2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Concatenate odd quadwords from two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuzp2q[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uzp2))] @@ -43872,7 +43872,7 @@ pub fn svuzp2q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilele))] @@ -43888,7 +43888,7 @@ pub fn svwhilele_b8_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilele))] @@ -43904,7 +43904,7 @@ pub fn svwhilele_b16_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilele))] @@ -43920,7 +43920,7 @@ pub fn svwhilele_b32_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilele))] @@ -43936,7 +43936,7 @@ pub fn svwhilele_b64_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilele))] @@ -43952,7 +43952,7 @@ pub fn svwhilele_b8_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilele))] @@ -43968,7 +43968,7 @@ pub fn svwhilele_b16_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilele))] @@ -43984,7 +43984,7 @@ pub fn svwhilele_b32_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilele))] @@ -44000,7 +44000,7 @@ pub fn svwhilele_b64_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilels))] @@ -44016,7 +44016,7 @@ pub fn svwhilele_b8_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilels))] @@ -44032,7 +44032,7 @@ pub fn svwhilele_b16_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilels))] @@ -44048,7 +44048,7 @@ pub fn svwhilele_b32_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilels))] @@ -44064,7 +44064,7 @@ pub fn svwhilele_b64_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b8[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilels))] @@ -44080,7 +44080,7 @@ pub fn svwhilele_b8_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b16[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilels))] @@ -44096,7 +44096,7 @@ pub fn svwhilele_b16_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b32[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilels))] @@ -44112,7 +44112,7 @@ pub fn svwhilele_b32_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While incrementing scalar is less than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilele_b64[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilels))] @@ -44128,7 +44128,7 @@ pub fn svwhilele_b64_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelt))] @@ -44144,7 +44144,7 @@ pub fn svwhilelt_b8_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelt))] @@ -44160,7 +44160,7 @@ pub fn svwhilelt_b16_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelt))] @@ -44176,7 +44176,7 @@ pub fn svwhilelt_b32_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelt))] @@ -44192,7 +44192,7 @@ pub fn svwhilelt_b64_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelt))] @@ -44208,7 +44208,7 @@ pub fn svwhilelt_b8_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelt))] @@ -44224,7 +44224,7 @@ pub fn svwhilelt_b16_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelt))] @@ -44240,7 +44240,7 @@ pub fn svwhilelt_b32_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelt))] @@ -44256,7 +44256,7 @@ pub fn svwhilelt_b64_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelo))] @@ -44272,7 +44272,7 @@ pub fn svwhilelt_b8_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelo))] @@ -44288,7 +44288,7 @@ pub fn svwhilelt_b16_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelo))] @@ -44304,7 +44304,7 @@ pub fn svwhilelt_b32_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelo))] @@ -44320,7 +44320,7 @@ pub fn svwhilelt_b64_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b8[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelo))] @@ -44336,7 +44336,7 @@ pub fn svwhilelt_b8_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b16[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelo))] @@ -44352,7 +44352,7 @@ pub fn svwhilelt_b16_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b32[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelo))] @@ -44368,7 +44368,7 @@ pub fn svwhilelt_b32_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While incrementing scalar is less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilelt_b64[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilelo))] @@ -44384,7 +44384,7 @@ pub fn svwhilelt_b64_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "Write to the first-fault register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwrffr)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(wrffr))] @@ -44397,7 +44397,7 @@ pub fn svwrffr(op: svbool_t) { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44410,7 +44410,7 @@ pub fn svzip1_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44423,7 +44423,7 @@ pub fn svzip1_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44436,7 +44436,7 @@ pub fn svzip1_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44449,7 +44449,7 @@ pub fn svzip1_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44462,7 +44462,7 @@ pub fn svzip1_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44475,7 +44475,7 @@ pub fn svzip1_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44488,7 +44488,7 @@ pub fn svzip1_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44501,7 +44501,7 @@ pub fn svzip1_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44514,7 +44514,7 @@ pub fn svzip1_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44527,7 +44527,7 @@ pub fn svzip1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44536,7 +44536,7 @@ pub fn svzip1_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44545,7 +44545,7 @@ pub fn svzip1_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44554,7 +44554,7 @@ pub fn svzip1_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Interleave elements from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44563,7 +44563,7 @@ pub fn svzip1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Interleave quadwords from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44576,7 +44576,7 @@ pub fn svzip1q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Interleave quadwords from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44589,7 +44589,7 @@ pub fn svzip1q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Interleave quadwords from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44602,7 +44602,7 @@ pub fn svzip1q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Interleave quadwords from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44615,7 +44615,7 @@ pub fn svzip1q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Interleave quadwords from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44628,7 +44628,7 @@ pub fn svzip1q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Interleave quadwords from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44641,7 +44641,7 @@ pub fn svzip1q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Interleave quadwords from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44650,7 +44650,7 @@ pub fn svzip1q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Interleave quadwords from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44659,7 +44659,7 @@ pub fn svzip1q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Interleave quadwords from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44668,7 +44668,7 @@ pub fn svzip1q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Interleave quadwords from low halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip1q[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip1))] @@ -44677,7 +44677,7 @@ pub fn svzip1q_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44690,7 +44690,7 @@ pub fn svzip2_b8(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44703,7 +44703,7 @@ pub fn svzip2_b16(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44716,7 +44716,7 @@ pub fn svzip2_b32(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2_b64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44729,7 +44729,7 @@ pub fn svzip2_b64(op1: svbool_t, op2: svbool_t) -> svbool_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44742,7 +44742,7 @@ pub fn svzip2_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44755,7 +44755,7 @@ pub fn svzip2_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44768,7 +44768,7 @@ pub fn svzip2_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44781,7 +44781,7 @@ pub fn svzip2_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44794,7 +44794,7 @@ pub fn svzip2_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44807,7 +44807,7 @@ pub fn svzip2_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44816,7 +44816,7 @@ pub fn svzip2_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44825,7 +44825,7 @@ pub fn svzip2_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44834,7 +44834,7 @@ pub fn svzip2_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Interleave elements from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44843,7 +44843,7 @@ pub fn svzip2_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Interleave quadwords from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44856,7 +44856,7 @@ pub fn svzip2q_f32(op1: svfloat32_t, op2: svfloat32_t) -> svfloat32_t { } #[doc = "Interleave quadwords from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44869,7 +44869,7 @@ pub fn svzip2q_f64(op1: svfloat64_t, op2: svfloat64_t) -> svfloat64_t { } #[doc = "Interleave quadwords from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44882,7 +44882,7 @@ pub fn svzip2q_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Interleave quadwords from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44895,7 +44895,7 @@ pub fn svzip2q_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Interleave quadwords from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44908,7 +44908,7 @@ pub fn svzip2q_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Interleave quadwords from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44921,7 +44921,7 @@ pub fn svzip2q_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Interleave quadwords from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44930,7 +44930,7 @@ pub fn svzip2q_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Interleave quadwords from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44939,7 +44939,7 @@ pub fn svzip2q_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Interleave quadwords from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] @@ -44948,7 +44948,7 @@ pub fn svzip2q_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Interleave quadwords from high halves of two inputs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svzip2q[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,f64mm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(zip2))] diff --git a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs index c5b0149c9c302..e11c20e5dd07d 100644 --- a/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs +++ b/library/stdarch/crates/core_arch/src/aarch64/sve2/generated.rs @@ -14,7 +14,7 @@ use super::*; #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saba))] @@ -27,7 +27,7 @@ pub fn svaba_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saba))] @@ -36,7 +36,7 @@ pub fn svaba_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saba))] @@ -49,7 +49,7 @@ pub fn svaba_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saba))] @@ -58,7 +58,7 @@ pub fn svaba_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saba))] @@ -71,7 +71,7 @@ pub fn svaba_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saba))] @@ -80,7 +80,7 @@ pub fn svaba_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saba))] @@ -93,7 +93,7 @@ pub fn svaba_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saba))] @@ -102,7 +102,7 @@ pub fn svaba_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaba))] @@ -115,7 +115,7 @@ pub fn svaba_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaba))] @@ -124,7 +124,7 @@ pub fn svaba_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaba))] @@ -137,7 +137,7 @@ pub fn svaba_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_ } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaba))] @@ -146,7 +146,7 @@ pub fn svaba_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaba))] @@ -159,7 +159,7 @@ pub fn svaba_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_ } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaba))] @@ -168,7 +168,7 @@ pub fn svaba_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaba))] @@ -181,7 +181,7 @@ pub fn svaba_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_ } #[doc = "Absolute difference and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaba[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaba))] @@ -190,7 +190,7 @@ pub fn svaba_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalb))] @@ -203,7 +203,7 @@ pub fn svabalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalb))] @@ -212,7 +212,7 @@ pub fn svabalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalb))] @@ -225,7 +225,7 @@ pub fn svabalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalb))] @@ -234,7 +234,7 @@ pub fn svabalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalb))] @@ -247,7 +247,7 @@ pub fn svabalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalb))] @@ -256,7 +256,7 @@ pub fn svabalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalb))] @@ -269,7 +269,7 @@ pub fn svabalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_ } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalb))] @@ -278,7 +278,7 @@ pub fn svabalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalb))] @@ -291,7 +291,7 @@ pub fn svabalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint3 } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalb))] @@ -300,7 +300,7 @@ pub fn svabalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalb))] @@ -313,7 +313,7 @@ pub fn svabalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint6 } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalb))] @@ -322,7 +322,7 @@ pub fn svabalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalt))] @@ -335,7 +335,7 @@ pub fn svabalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalt))] @@ -344,7 +344,7 @@ pub fn svabalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalt))] @@ -357,7 +357,7 @@ pub fn svabalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalt))] @@ -366,7 +366,7 @@ pub fn svabalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalt))] @@ -379,7 +379,7 @@ pub fn svabalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabalt))] @@ -388,7 +388,7 @@ pub fn svabalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalt))] @@ -401,7 +401,7 @@ pub fn svabalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_ } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalt))] @@ -410,7 +410,7 @@ pub fn svabalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalt))] @@ -423,7 +423,7 @@ pub fn svabalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint3 } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalt))] @@ -432,7 +432,7 @@ pub fn svabalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalt))] @@ -445,7 +445,7 @@ pub fn svabalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint6 } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabalt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabalt))] @@ -454,7 +454,7 @@ pub fn svabalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlb))] @@ -467,7 +467,7 @@ pub fn svabdlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlb))] @@ -476,7 +476,7 @@ pub fn svabdlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlb))] @@ -489,7 +489,7 @@ pub fn svabdlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlb))] @@ -498,7 +498,7 @@ pub fn svabdlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlb))] @@ -511,7 +511,7 @@ pub fn svabdlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlb))] @@ -520,7 +520,7 @@ pub fn svabdlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlb))] @@ -533,7 +533,7 @@ pub fn svabdlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlb))] @@ -542,7 +542,7 @@ pub fn svabdlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlb))] @@ -555,7 +555,7 @@ pub fn svabdlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlb))] @@ -564,7 +564,7 @@ pub fn svabdlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlb))] @@ -577,7 +577,7 @@ pub fn svabdlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Absolute difference long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlb))] @@ -586,7 +586,7 @@ pub fn svabdlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlt))] @@ -599,7 +599,7 @@ pub fn svabdlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlt))] @@ -608,7 +608,7 @@ pub fn svabdlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlt))] @@ -621,7 +621,7 @@ pub fn svabdlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlt))] @@ -630,7 +630,7 @@ pub fn svabdlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlt))] @@ -643,7 +643,7 @@ pub fn svabdlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sabdlt))] @@ -652,7 +652,7 @@ pub fn svabdlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlt))] @@ -665,7 +665,7 @@ pub fn svabdlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlt))] @@ -674,7 +674,7 @@ pub fn svabdlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlt))] @@ -687,7 +687,7 @@ pub fn svabdlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlt))] @@ -696,7 +696,7 @@ pub fn svabdlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlt))] @@ -709,7 +709,7 @@ pub fn svabdlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Absolute difference long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svabdlt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uabdlt))] @@ -718,7 +718,7 @@ pub fn svabdlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sadalp))] @@ -731,7 +731,7 @@ pub fn svadalp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sadalp))] @@ -740,7 +740,7 @@ pub fn svadalp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sadalp))] @@ -749,7 +749,7 @@ pub fn svadalp_s16_z(pg: svbool_t, op1: svint16_t, op2: svint8_t) -> svint16_t { } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sadalp))] @@ -762,7 +762,7 @@ pub fn svadalp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sadalp))] @@ -771,7 +771,7 @@ pub fn svadalp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sadalp))] @@ -780,7 +780,7 @@ pub fn svadalp_s32_z(pg: svbool_t, op1: svint32_t, op2: svint16_t) -> svint32_t } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sadalp))] @@ -793,7 +793,7 @@ pub fn svadalp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sadalp))] @@ -802,7 +802,7 @@ pub fn svadalp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sadalp))] @@ -811,7 +811,7 @@ pub fn svadalp_s64_z(pg: svbool_t, op1: svint64_t, op2: svint32_t) -> svint64_t } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uadalp))] @@ -824,7 +824,7 @@ pub fn svadalp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_ } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uadalp))] @@ -833,7 +833,7 @@ pub fn svadalp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_ } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uadalp))] @@ -842,7 +842,7 @@ pub fn svadalp_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint8_t) -> svuint16_ } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uadalp))] @@ -855,7 +855,7 @@ pub fn svadalp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32 } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uadalp))] @@ -864,7 +864,7 @@ pub fn svadalp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32 } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uadalp))] @@ -873,7 +873,7 @@ pub fn svadalp_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint16_t) -> svuint32 } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uadalp))] @@ -886,7 +886,7 @@ pub fn svadalp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64 } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uadalp))] @@ -895,7 +895,7 @@ pub fn svadalp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64 } #[doc = "Add and accumulate long pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadalp[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uadalp))] @@ -904,7 +904,7 @@ pub fn svadalp_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint32_t) -> svuint64 } #[doc = "Add with carry long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adclb))] @@ -917,7 +917,7 @@ pub fn svadclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint3 } #[doc = "Add with carry long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adclb))] @@ -926,7 +926,7 @@ pub fn svadclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Add with carry long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adclb))] @@ -939,7 +939,7 @@ pub fn svadclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint6 } #[doc = "Add with carry long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adclb))] @@ -948,7 +948,7 @@ pub fn svadclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Add with carry long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adclt))] @@ -961,7 +961,7 @@ pub fn svadclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint3 } #[doc = "Add with carry long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adclt))] @@ -970,7 +970,7 @@ pub fn svadclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Add with carry long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adclt))] @@ -983,7 +983,7 @@ pub fn svadclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint6 } #[doc = "Add with carry long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svadclt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(adclt))] @@ -992,7 +992,7 @@ pub fn svadclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1005,7 +1005,7 @@ pub fn svaddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1014,7 +1014,7 @@ pub fn svaddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1027,7 +1027,7 @@ pub fn svaddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1036,7 +1036,7 @@ pub fn svaddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1049,7 +1049,7 @@ pub fn svaddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1058,7 +1058,7 @@ pub fn svaddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1067,7 +1067,7 @@ pub fn svaddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1076,7 +1076,7 @@ pub fn svaddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1085,7 +1085,7 @@ pub fn svaddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1094,7 +1094,7 @@ pub fn svaddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1103,7 +1103,7 @@ pub fn svaddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { } #[doc = "Add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnb))] @@ -1112,7 +1112,7 @@ pub fn svaddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1125,7 +1125,7 @@ pub fn svaddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1134,7 +1134,7 @@ pub fn svaddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1147,7 +1147,7 @@ pub fn svaddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_ } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1156,7 +1156,7 @@ pub fn svaddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1169,7 +1169,7 @@ pub fn svaddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_ } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1178,7 +1178,7 @@ pub fn svaddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1187,7 +1187,7 @@ pub fn svaddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1196,7 +1196,7 @@ pub fn svaddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1205,7 +1205,7 @@ pub fn svaddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuin } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1214,7 +1214,7 @@ pub fn svaddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1223,7 +1223,7 @@ pub fn svaddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuin } #[doc = "Add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddhnt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addhnt))] @@ -1232,7 +1232,7 @@ pub fn svaddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlb))] @@ -1245,7 +1245,7 @@ pub fn svaddlb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlb))] @@ -1254,7 +1254,7 @@ pub fn svaddlb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlb))] @@ -1267,7 +1267,7 @@ pub fn svaddlb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlb))] @@ -1276,7 +1276,7 @@ pub fn svaddlb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlb))] @@ -1289,7 +1289,7 @@ pub fn svaddlb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlb))] @@ -1298,7 +1298,7 @@ pub fn svaddlb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlb))] @@ -1311,7 +1311,7 @@ pub fn svaddlb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlb))] @@ -1320,7 +1320,7 @@ pub fn svaddlb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlb))] @@ -1333,7 +1333,7 @@ pub fn svaddlb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlb))] @@ -1342,7 +1342,7 @@ pub fn svaddlb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlb))] @@ -1355,7 +1355,7 @@ pub fn svaddlb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlb))] @@ -1364,7 +1364,7 @@ pub fn svaddlb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { } #[doc = "Add long (bottom + top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlbt))] @@ -1380,7 +1380,7 @@ pub fn svaddlbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Add long (bottom + top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlbt))] @@ -1389,7 +1389,7 @@ pub fn svaddlbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Add long (bottom + top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlbt))] @@ -1405,7 +1405,7 @@ pub fn svaddlbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Add long (bottom + top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlbt))] @@ -1414,7 +1414,7 @@ pub fn svaddlbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Add long (bottom + top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlbt))] @@ -1430,7 +1430,7 @@ pub fn svaddlbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Add long (bottom + top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlbt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlbt))] @@ -1439,7 +1439,7 @@ pub fn svaddlbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlt))] @@ -1452,7 +1452,7 @@ pub fn svaddlt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlt))] @@ -1461,7 +1461,7 @@ pub fn svaddlt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlt))] @@ -1474,7 +1474,7 @@ pub fn svaddlt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlt))] @@ -1483,7 +1483,7 @@ pub fn svaddlt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlt))] @@ -1496,7 +1496,7 @@ pub fn svaddlt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddlt))] @@ -1505,7 +1505,7 @@ pub fn svaddlt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlt))] @@ -1518,7 +1518,7 @@ pub fn svaddlt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlt))] @@ -1527,7 +1527,7 @@ pub fn svaddlt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlt))] @@ -1540,7 +1540,7 @@ pub fn svaddlt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlt))] @@ -1549,7 +1549,7 @@ pub fn svaddlt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlt))] @@ -1562,7 +1562,7 @@ pub fn svaddlt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddlt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddlt))] @@ -1571,7 +1571,7 @@ pub fn svaddlt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(faddp))] @@ -1584,7 +1584,7 @@ pub fn svaddp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(faddp))] @@ -1593,7 +1593,7 @@ pub fn svaddp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(faddp))] @@ -1606,7 +1606,7 @@ pub fn svaddp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(faddp))] @@ -1615,7 +1615,7 @@ pub fn svaddp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1628,7 +1628,7 @@ pub fn svaddp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1637,7 +1637,7 @@ pub fn svaddp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1650,7 +1650,7 @@ pub fn svaddp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1659,7 +1659,7 @@ pub fn svaddp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1672,7 +1672,7 @@ pub fn svaddp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1681,7 +1681,7 @@ pub fn svaddp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1694,7 +1694,7 @@ pub fn svaddp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1703,7 +1703,7 @@ pub fn svaddp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1712,7 +1712,7 @@ pub fn svaddp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1721,7 +1721,7 @@ pub fn svaddp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1730,7 +1730,7 @@ pub fn svaddp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1739,7 +1739,7 @@ pub fn svaddp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1748,7 +1748,7 @@ pub fn svaddp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1757,7 +1757,7 @@ pub fn svaddp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1766,7 +1766,7 @@ pub fn svaddp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddp[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(addp))] @@ -1775,7 +1775,7 @@ pub fn svaddp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwb))] @@ -1788,7 +1788,7 @@ pub fn svaddwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwb))] @@ -1797,7 +1797,7 @@ pub fn svaddwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwb))] @@ -1810,7 +1810,7 @@ pub fn svaddwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwb))] @@ -1819,7 +1819,7 @@ pub fn svaddwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwb))] @@ -1832,7 +1832,7 @@ pub fn svaddwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwb))] @@ -1841,7 +1841,7 @@ pub fn svaddwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwb))] @@ -1854,7 +1854,7 @@ pub fn svaddwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwb))] @@ -1863,7 +1863,7 @@ pub fn svaddwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwb))] @@ -1876,7 +1876,7 @@ pub fn svaddwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwb))] @@ -1885,7 +1885,7 @@ pub fn svaddwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwb))] @@ -1898,7 +1898,7 @@ pub fn svaddwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Add wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwb))] @@ -1907,7 +1907,7 @@ pub fn svaddwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwt))] @@ -1920,7 +1920,7 @@ pub fn svaddwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwt))] @@ -1929,7 +1929,7 @@ pub fn svaddwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwt))] @@ -1942,7 +1942,7 @@ pub fn svaddwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwt))] @@ -1951,7 +1951,7 @@ pub fn svaddwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwt))] @@ -1964,7 +1964,7 @@ pub fn svaddwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(saddwt))] @@ -1973,7 +1973,7 @@ pub fn svaddwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwt))] @@ -1986,7 +1986,7 @@ pub fn svaddwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwt))] @@ -1995,7 +1995,7 @@ pub fn svaddwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwt))] @@ -2008,7 +2008,7 @@ pub fn svaddwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwt))] @@ -2017,7 +2017,7 @@ pub fn svaddwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwt))] @@ -2030,7 +2030,7 @@ pub fn svaddwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Add wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaddwt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uaddwt))] @@ -2039,7 +2039,7 @@ pub fn svaddwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { } #[doc = "AES single round decryption"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesd[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(aesd))] @@ -2052,7 +2052,7 @@ pub fn svaesd_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "AES single round encryption"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaese[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(aese))] @@ -2065,7 +2065,7 @@ pub fn svaese_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "AES inverse mix columns"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesimc[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(aesimc))] @@ -2078,7 +2078,7 @@ pub fn svaesimc_u8(op: svuint8_t) -> svuint8_t { } #[doc = "AES mix columns"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svaesmc[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(aesmc))] @@ -2091,7 +2091,7 @@ pub fn svaesmc_u8(op: svuint8_t) -> svuint8_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2104,7 +2104,7 @@ pub fn svbcax_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2113,7 +2113,7 @@ pub fn svbcax_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2126,7 +2126,7 @@ pub fn svbcax_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2135,7 +2135,7 @@ pub fn svbcax_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2148,7 +2148,7 @@ pub fn svbcax_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2157,7 +2157,7 @@ pub fn svbcax_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2170,7 +2170,7 @@ pub fn svbcax_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2179,7 +2179,7 @@ pub fn svbcax_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2188,7 +2188,7 @@ pub fn svbcax_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2197,7 +2197,7 @@ pub fn svbcax_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2206,7 +2206,7 @@ pub fn svbcax_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16 } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2215,7 +2215,7 @@ pub fn svbcax_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2224,7 +2224,7 @@ pub fn svbcax_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32 } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2233,7 +2233,7 @@ pub fn svbcax_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2242,7 +2242,7 @@ pub fn svbcax_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64 } #[doc = "Bitwise clear and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbcax[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bcax))] @@ -2251,7 +2251,7 @@ pub fn svbcax_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Scatter lower bits into positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bdep))] @@ -2264,7 +2264,7 @@ pub fn svbdep_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Scatter lower bits into positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bdep))] @@ -2273,7 +2273,7 @@ pub fn svbdep_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Scatter lower bits into positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bdep))] @@ -2286,7 +2286,7 @@ pub fn svbdep_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Scatter lower bits into positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bdep))] @@ -2295,7 +2295,7 @@ pub fn svbdep_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Scatter lower bits into positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bdep))] @@ -2308,7 +2308,7 @@ pub fn svbdep_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Scatter lower bits into positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bdep))] @@ -2317,7 +2317,7 @@ pub fn svbdep_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Scatter lower bits into positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bdep))] @@ -2330,7 +2330,7 @@ pub fn svbdep_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Scatter lower bits into positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbdep[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bdep))] @@ -2339,7 +2339,7 @@ pub fn svbdep_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Gather lower bits from positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bext))] @@ -2352,7 +2352,7 @@ pub fn svbext_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Gather lower bits from positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bext))] @@ -2361,7 +2361,7 @@ pub fn svbext_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Gather lower bits from positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bext))] @@ -2374,7 +2374,7 @@ pub fn svbext_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Gather lower bits from positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bext))] @@ -2383,7 +2383,7 @@ pub fn svbext_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Gather lower bits from positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bext))] @@ -2396,7 +2396,7 @@ pub fn svbext_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Gather lower bits from positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bext))] @@ -2405,7 +2405,7 @@ pub fn svbext_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Gather lower bits from positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bext))] @@ -2418,7 +2418,7 @@ pub fn svbext_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Gather lower bits from positions selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbext[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bext))] @@ -2427,7 +2427,7 @@ pub fn svbext_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Group bits to right or left as selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bgrp))] @@ -2440,7 +2440,7 @@ pub fn svbgrp_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Group bits to right or left as selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bgrp))] @@ -2449,7 +2449,7 @@ pub fn svbgrp_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Group bits to right or left as selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bgrp))] @@ -2462,7 +2462,7 @@ pub fn svbgrp_u16(op1: svuint16_t, op2: svuint16_t) -> svuint16_t { } #[doc = "Group bits to right or left as selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bgrp))] @@ -2471,7 +2471,7 @@ pub fn svbgrp_n_u16(op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Group bits to right or left as selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bgrp))] @@ -2484,7 +2484,7 @@ pub fn svbgrp_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Group bits to right or left as selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bgrp))] @@ -2493,7 +2493,7 @@ pub fn svbgrp_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Group bits to right or left as selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bgrp))] @@ -2506,7 +2506,7 @@ pub fn svbgrp_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Group bits to right or left as selected by bitmask"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbgrp[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-bitperm")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bgrp))] @@ -2515,7 +2515,7 @@ pub fn svbgrp_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2528,7 +2528,7 @@ pub fn svbsl1n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2537,7 +2537,7 @@ pub fn svbsl1n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2550,7 +2550,7 @@ pub fn svbsl1n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2559,7 +2559,7 @@ pub fn svbsl1n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2572,7 +2572,7 @@ pub fn svbsl1n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2581,7 +2581,7 @@ pub fn svbsl1n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2594,7 +2594,7 @@ pub fn svbsl1n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2603,7 +2603,7 @@ pub fn svbsl1n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2612,7 +2612,7 @@ pub fn svbsl1n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2621,7 +2621,7 @@ pub fn svbsl1n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2630,7 +2630,7 @@ pub fn svbsl1n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint1 } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2639,7 +2639,7 @@ pub fn svbsl1n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2648,7 +2648,7 @@ pub fn svbsl1n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint3 } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2657,7 +2657,7 @@ pub fn svbsl1n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2666,7 +2666,7 @@ pub fn svbsl1n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint6 } #[doc = "Bitwise select with first input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl1n[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl1n))] @@ -2675,7 +2675,7 @@ pub fn svbsl1n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2688,7 +2688,7 @@ pub fn svbsl2n_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2697,7 +2697,7 @@ pub fn svbsl2n_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2710,7 +2710,7 @@ pub fn svbsl2n_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2719,7 +2719,7 @@ pub fn svbsl2n_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2732,7 +2732,7 @@ pub fn svbsl2n_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2741,7 +2741,7 @@ pub fn svbsl2n_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2754,7 +2754,7 @@ pub fn svbsl2n_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2763,7 +2763,7 @@ pub fn svbsl2n_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2772,7 +2772,7 @@ pub fn svbsl2n_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2781,7 +2781,7 @@ pub fn svbsl2n_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2790,7 +2790,7 @@ pub fn svbsl2n_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint1 } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2799,7 +2799,7 @@ pub fn svbsl2n_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2808,7 +2808,7 @@ pub fn svbsl2n_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint3 } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2817,7 +2817,7 @@ pub fn svbsl2n_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2826,7 +2826,7 @@ pub fn svbsl2n_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint6 } #[doc = "Bitwise select with second input inverted"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl2n[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl2n))] @@ -2835,7 +2835,7 @@ pub fn svbsl2n_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2848,7 +2848,7 @@ pub fn svbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2857,7 +2857,7 @@ pub fn svbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2870,7 +2870,7 @@ pub fn svbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2879,7 +2879,7 @@ pub fn svbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2892,7 +2892,7 @@ pub fn svbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2901,7 +2901,7 @@ pub fn svbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2914,7 +2914,7 @@ pub fn svbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2923,7 +2923,7 @@ pub fn svbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2932,7 +2932,7 @@ pub fn svbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2941,7 +2941,7 @@ pub fn svbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2950,7 +2950,7 @@ pub fn svbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16_ } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2959,7 +2959,7 @@ pub fn svbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2968,7 +2968,7 @@ pub fn svbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32_ } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2977,7 +2977,7 @@ pub fn svbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2986,7 +2986,7 @@ pub fn svbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64_ } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svbsl[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(bsl))] @@ -2995,7 +2995,7 @@ pub fn svbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] @@ -3009,7 +3009,7 @@ pub fn svcadd_s8(op1: svint8_t, op2: svint8_t) -> svint } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] @@ -3023,7 +3023,7 @@ pub fn svcadd_s16(op1: svint16_t, op2: svint16_t) -> sv } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] @@ -3037,7 +3037,7 @@ pub fn svcadd_s32(op1: svint32_t, op2: svint32_t) -> sv } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] @@ -3051,7 +3051,7 @@ pub fn svcadd_s64(op1: svint64_t, op2: svint64_t) -> sv } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] @@ -3061,7 +3061,7 @@ pub fn svcadd_u8(op1: svuint8_t, op2: svuint8_t) -> svu } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] @@ -3071,7 +3071,7 @@ pub fn svcadd_u16(op1: svuint16_t, op2: svuint16_t) -> } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] @@ -3081,7 +3081,7 @@ pub fn svcadd_u32(op1: svuint32_t, op2: svuint32_t) -> } #[doc = "Complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcadd[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cadd, IMM_ROTATION = 90))] @@ -3091,7 +3091,7 @@ pub fn svcadd_u64(op1: svuint64_t, op2: svuint64_t) -> } #[doc = "Complex dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] @@ -3121,7 +3121,7 @@ pub fn svcdot_lane_s32( } #[doc = "Complex dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cdot, IMM_INDEX = 0, IMM_ROTATION = 90))] @@ -3151,7 +3151,7 @@ pub fn svcdot_lane_s64( } #[doc = "Complex dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] @@ -3176,7 +3176,7 @@ pub fn svcdot_s32( } #[doc = "Complex dot product"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcdot[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cdot, IMM_ROTATION = 90))] @@ -3201,7 +3201,7 @@ pub fn svcdot_s64( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] @@ -3231,7 +3231,7 @@ pub fn svcmla_lane_s16( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] @@ -3261,7 +3261,7 @@ pub fn svcmla_lane_s32( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] @@ -3285,7 +3285,7 @@ pub fn svcmla_lane_u16( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_INDEX = 0, IMM_ROTATION = 90))] @@ -3309,7 +3309,7 @@ pub fn svcmla_lane_u32( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] @@ -3325,7 +3325,7 @@ pub fn svcmla_s8(op1: svint8_t, op2: svint8_t, op3: svi } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] @@ -3350,7 +3350,7 @@ pub fn svcmla_s16( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] @@ -3375,7 +3375,7 @@ pub fn svcmla_s32( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] @@ -3400,7 +3400,7 @@ pub fn svcmla_s64( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] @@ -3418,7 +3418,7 @@ pub fn svcmla_u8( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] @@ -3436,7 +3436,7 @@ pub fn svcmla_u16( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] @@ -3454,7 +3454,7 @@ pub fn svcmla_u32( } #[doc = "Complex multiply-add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcmla[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(cmla, IMM_ROTATION = 90))] @@ -3472,7 +3472,7 @@ pub fn svcmla_u64( } #[doc = "Up convert long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtlt))] @@ -3486,7 +3486,7 @@ pub fn svcvtlt_f64_f32_m(inactive: svfloat64_t, pg: svbool_t, op: svfloat32_t) - } #[doc = "Up convert long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtlt_f64[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtlt))] @@ -3495,7 +3495,7 @@ pub fn svcvtlt_f64_f32_x(pg: svbool_t, op: svfloat32_t) -> svfloat64_t { } #[doc = "Down convert and narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtnt))] @@ -3508,7 +3508,7 @@ pub fn svcvtnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Down convert and narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtnt_f32[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtnt))] @@ -3517,7 +3517,7 @@ pub fn svcvtnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> sv } #[doc = "Down convert, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtx))] @@ -3530,7 +3530,7 @@ pub fn svcvtx_f32_f64_m(inactive: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> } #[doc = "Down convert, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtx))] @@ -3539,7 +3539,7 @@ pub fn svcvtx_f32_f64_x(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { } #[doc = "Down convert, rounding to odd"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtx_f32[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtx))] @@ -3548,7 +3548,7 @@ pub fn svcvtx_f32_f64_z(pg: svbool_t, op: svfloat64_t) -> svfloat32_t { } #[doc = "Down convert, rounding to odd (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtxnt))] @@ -3561,7 +3561,7 @@ pub fn svcvtxnt_f32_f64_m(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> s } #[doc = "Down convert, rounding to odd (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svcvtxnt_f32[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fcvtxnt))] @@ -3570,7 +3570,7 @@ pub fn svcvtxnt_f32_f64_x(even: svfloat32_t, pg: svbool_t, op: svfloat64_t) -> s } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3583,7 +3583,7 @@ pub fn sveor3_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3592,7 +3592,7 @@ pub fn sveor3_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3605,7 +3605,7 @@ pub fn sveor3_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3614,7 +3614,7 @@ pub fn sveor3_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3627,7 +3627,7 @@ pub fn sveor3_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3636,7 +3636,7 @@ pub fn sveor3_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3649,7 +3649,7 @@ pub fn sveor3_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3658,7 +3658,7 @@ pub fn sveor3_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3667,7 +3667,7 @@ pub fn sveor3_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3676,7 +3676,7 @@ pub fn sveor3_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3685,7 +3685,7 @@ pub fn sveor3_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16 } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3694,7 +3694,7 @@ pub fn sveor3_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3703,7 +3703,7 @@ pub fn sveor3_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32 } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3712,7 +3712,7 @@ pub fn sveor3_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3721,7 +3721,7 @@ pub fn sveor3_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64 } #[doc = "Bitwise exclusive OR of three vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveor3[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eor3))] @@ -3730,7 +3730,7 @@ pub fn sveor3_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3743,7 +3743,7 @@ pub fn sveorbt_s8(odd: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3752,7 +3752,7 @@ pub fn sveorbt_n_s8(odd: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3765,7 +3765,7 @@ pub fn sveorbt_s16(odd: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3774,7 +3774,7 @@ pub fn sveorbt_n_s16(odd: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3787,7 +3787,7 @@ pub fn sveorbt_s32(odd: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3796,7 +3796,7 @@ pub fn sveorbt_n_s32(odd: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3809,7 +3809,7 @@ pub fn sveorbt_s64(odd: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3818,7 +3818,7 @@ pub fn sveorbt_n_s64(odd: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3827,7 +3827,7 @@ pub fn sveorbt_u8(odd: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3836,7 +3836,7 @@ pub fn sveorbt_n_u8(odd: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3845,7 +3845,7 @@ pub fn sveorbt_u16(odd: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint1 } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3854,7 +3854,7 @@ pub fn sveorbt_n_u16(odd: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3863,7 +3863,7 @@ pub fn sveorbt_u32(odd: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint3 } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3872,7 +3872,7 @@ pub fn sveorbt_n_u32(odd: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3881,7 +3881,7 @@ pub fn sveorbt_u64(odd: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint6 } #[doc = "Interleaving exclusive OR (bottom, top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveorbt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eorbt))] @@ -3890,7 +3890,7 @@ pub fn sveorbt_n_u64(odd: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -3903,7 +3903,7 @@ pub fn sveortb_s8(even: svint8_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -3912,7 +3912,7 @@ pub fn sveortb_n_s8(even: svint8_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -3925,7 +3925,7 @@ pub fn sveortb_s16(even: svint16_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -3934,7 +3934,7 @@ pub fn sveortb_n_s16(even: svint16_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -3947,7 +3947,7 @@ pub fn sveortb_s32(even: svint32_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -3956,7 +3956,7 @@ pub fn sveortb_n_s32(even: svint32_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -3969,7 +3969,7 @@ pub fn sveortb_s64(even: svint64_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -3978,7 +3978,7 @@ pub fn sveortb_n_s64(even: svint64_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -3987,7 +3987,7 @@ pub fn sveortb_u8(even: svuint8_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -3996,7 +3996,7 @@ pub fn sveortb_n_u8(even: svuint8_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -4005,7 +4005,7 @@ pub fn sveortb_u16(even: svuint16_t, op1: svuint16_t, op2: svuint16_t) -> svuint } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -4014,7 +4014,7 @@ pub fn sveortb_n_u16(even: svuint16_t, op1: svuint16_t, op2: u16) -> svuint16_t } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -4023,7 +4023,7 @@ pub fn sveortb_u32(even: svuint32_t, op1: svuint32_t, op2: svuint32_t) -> svuint } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -4032,7 +4032,7 @@ pub fn sveortb_n_u32(even: svuint32_t, op1: svuint32_t, op2: u32) -> svuint32_t } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -4041,7 +4041,7 @@ pub fn sveortb_u64(even: svuint64_t, op1: svuint64_t, op2: svuint64_t) -> svuint } #[doc = "Interleaving exclusive OR (top, bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/sveortb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(eortb))] @@ -4050,7 +4050,7 @@ pub fn sveortb_n_u64(even: svuint64_t, op1: svuint64_t, op2: u64) -> svuint64_t } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4063,7 +4063,7 @@ pub fn svhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4072,7 +4072,7 @@ pub fn svhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4081,7 +4081,7 @@ pub fn svhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4090,7 +4090,7 @@ pub fn svhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4099,7 +4099,7 @@ pub fn svhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4108,7 +4108,7 @@ pub fn svhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4121,7 +4121,7 @@ pub fn svhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4130,7 +4130,7 @@ pub fn svhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4139,7 +4139,7 @@ pub fn svhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4148,7 +4148,7 @@ pub fn svhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4157,7 +4157,7 @@ pub fn svhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4166,7 +4166,7 @@ pub fn svhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4179,7 +4179,7 @@ pub fn svhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4188,7 +4188,7 @@ pub fn svhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4197,7 +4197,7 @@ pub fn svhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4206,7 +4206,7 @@ pub fn svhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4215,7 +4215,7 @@ pub fn svhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4224,7 +4224,7 @@ pub fn svhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4237,7 +4237,7 @@ pub fn svhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4246,7 +4246,7 @@ pub fn svhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4255,7 +4255,7 @@ pub fn svhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4264,7 +4264,7 @@ pub fn svhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4273,7 +4273,7 @@ pub fn svhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shadd))] @@ -4282,7 +4282,7 @@ pub fn svhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4295,7 +4295,7 @@ pub fn svhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4304,7 +4304,7 @@ pub fn svhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4313,7 +4313,7 @@ pub fn svhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4322,7 +4322,7 @@ pub fn svhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4331,7 +4331,7 @@ pub fn svhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4340,7 +4340,7 @@ pub fn svhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4353,7 +4353,7 @@ pub fn svhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4362,7 +4362,7 @@ pub fn svhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4371,7 +4371,7 @@ pub fn svhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4380,7 +4380,7 @@ pub fn svhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4389,7 +4389,7 @@ pub fn svhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4398,7 +4398,7 @@ pub fn svhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4411,7 +4411,7 @@ pub fn svhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4420,7 +4420,7 @@ pub fn svhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4429,7 +4429,7 @@ pub fn svhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4438,7 +4438,7 @@ pub fn svhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4447,7 +4447,7 @@ pub fn svhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4456,7 +4456,7 @@ pub fn svhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4469,7 +4469,7 @@ pub fn svhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4478,7 +4478,7 @@ pub fn svhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4487,7 +4487,7 @@ pub fn svhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4496,7 +4496,7 @@ pub fn svhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4505,7 +4505,7 @@ pub fn svhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhadd[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhadd))] @@ -4514,7 +4514,7 @@ pub fn svhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Count matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(histcnt))] @@ -4530,7 +4530,7 @@ pub fn svhistcnt_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svuint32 } #[doc = "Count matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(histcnt))] @@ -4546,7 +4546,7 @@ pub fn svhistcnt_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svuint64 } #[doc = "Count matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(histcnt))] @@ -4555,7 +4555,7 @@ pub fn svhistcnt_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint } #[doc = "Count matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistcnt[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(histcnt))] @@ -4564,7 +4564,7 @@ pub fn svhistcnt_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint } #[doc = "Count matching elements in 128-bit segments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(histseg))] @@ -4580,7 +4580,7 @@ pub fn svhistseg_s8(op1: svint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Count matching elements in 128-bit segments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhistseg[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(histseg))] @@ -4589,7 +4589,7 @@ pub fn svhistseg_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4602,7 +4602,7 @@ pub fn svhsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4611,7 +4611,7 @@ pub fn svhsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4620,7 +4620,7 @@ pub fn svhsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4629,7 +4629,7 @@ pub fn svhsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4638,7 +4638,7 @@ pub fn svhsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4647,7 +4647,7 @@ pub fn svhsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4660,7 +4660,7 @@ pub fn svhsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4669,7 +4669,7 @@ pub fn svhsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4678,7 +4678,7 @@ pub fn svhsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4687,7 +4687,7 @@ pub fn svhsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4696,7 +4696,7 @@ pub fn svhsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4705,7 +4705,7 @@ pub fn svhsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4718,7 +4718,7 @@ pub fn svhsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4727,7 +4727,7 @@ pub fn svhsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4736,7 +4736,7 @@ pub fn svhsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4745,7 +4745,7 @@ pub fn svhsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4754,7 +4754,7 @@ pub fn svhsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4763,7 +4763,7 @@ pub fn svhsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4776,7 +4776,7 @@ pub fn svhsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4785,7 +4785,7 @@ pub fn svhsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4794,7 +4794,7 @@ pub fn svhsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4803,7 +4803,7 @@ pub fn svhsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4812,7 +4812,7 @@ pub fn svhsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -4821,7 +4821,7 @@ pub fn svhsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4834,7 +4834,7 @@ pub fn svhsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4843,7 +4843,7 @@ pub fn svhsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4852,7 +4852,7 @@ pub fn svhsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4861,7 +4861,7 @@ pub fn svhsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4870,7 +4870,7 @@ pub fn svhsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4879,7 +4879,7 @@ pub fn svhsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4892,7 +4892,7 @@ pub fn svhsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4901,7 +4901,7 @@ pub fn svhsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4910,7 +4910,7 @@ pub fn svhsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4919,7 +4919,7 @@ pub fn svhsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4928,7 +4928,7 @@ pub fn svhsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4937,7 +4937,7 @@ pub fn svhsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4950,7 +4950,7 @@ pub fn svhsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4959,7 +4959,7 @@ pub fn svhsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4968,7 +4968,7 @@ pub fn svhsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4977,7 +4977,7 @@ pub fn svhsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4986,7 +4986,7 @@ pub fn svhsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -4995,7 +4995,7 @@ pub fn svhsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5008,7 +5008,7 @@ pub fn svhsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5017,7 +5017,7 @@ pub fn svhsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5026,7 +5026,7 @@ pub fn svhsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5035,7 +5035,7 @@ pub fn svhsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5044,7 +5044,7 @@ pub fn svhsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsub[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5053,7 +5053,7 @@ pub fn svhsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5066,7 +5066,7 @@ pub fn svhsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5075,7 +5075,7 @@ pub fn svhsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5084,7 +5084,7 @@ pub fn svhsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5093,7 +5093,7 @@ pub fn svhsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5102,7 +5102,7 @@ pub fn svhsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5111,7 +5111,7 @@ pub fn svhsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5124,7 +5124,7 @@ pub fn svhsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5133,7 +5133,7 @@ pub fn svhsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5142,7 +5142,7 @@ pub fn svhsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5151,7 +5151,7 @@ pub fn svhsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5160,7 +5160,7 @@ pub fn svhsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5169,7 +5169,7 @@ pub fn svhsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5182,7 +5182,7 @@ pub fn svhsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5191,7 +5191,7 @@ pub fn svhsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5200,7 +5200,7 @@ pub fn svhsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5209,7 +5209,7 @@ pub fn svhsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5218,7 +5218,7 @@ pub fn svhsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5227,7 +5227,7 @@ pub fn svhsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5240,7 +5240,7 @@ pub fn svhsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5249,7 +5249,7 @@ pub fn svhsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5258,7 +5258,7 @@ pub fn svhsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5267,7 +5267,7 @@ pub fn svhsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5276,7 +5276,7 @@ pub fn svhsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shsub))] @@ -5285,7 +5285,7 @@ pub fn svhsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5298,7 +5298,7 @@ pub fn svhsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5307,7 +5307,7 @@ pub fn svhsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5316,7 +5316,7 @@ pub fn svhsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5325,7 +5325,7 @@ pub fn svhsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5334,7 +5334,7 @@ pub fn svhsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5343,7 +5343,7 @@ pub fn svhsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5356,7 +5356,7 @@ pub fn svhsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16 } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5365,7 +5365,7 @@ pub fn svhsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5374,7 +5374,7 @@ pub fn svhsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16 } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5383,7 +5383,7 @@ pub fn svhsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5392,7 +5392,7 @@ pub fn svhsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16 } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5401,7 +5401,7 @@ pub fn svhsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5414,7 +5414,7 @@ pub fn svhsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32 } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5423,7 +5423,7 @@ pub fn svhsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5432,7 +5432,7 @@ pub fn svhsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32 } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5441,7 +5441,7 @@ pub fn svhsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5450,7 +5450,7 @@ pub fn svhsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32 } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5459,7 +5459,7 @@ pub fn svhsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5472,7 +5472,7 @@ pub fn svhsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64 } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5481,7 +5481,7 @@ pub fn svhsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5490,7 +5490,7 @@ pub fn svhsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64 } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5499,7 +5499,7 @@ pub fn svhsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5508,7 +5508,7 @@ pub fn svhsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64 } #[doc = "Halving subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svhsubr[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uhsub))] @@ -5521,7 +5521,7 @@ pub fn svhsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5549,7 +5549,7 @@ pub unsafe fn svldnt1_gather_s64index_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5577,7 +5577,7 @@ pub unsafe fn svldnt1_gather_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5594,7 +5594,7 @@ pub unsafe fn svldnt1_gather_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5611,7 +5611,7 @@ pub unsafe fn svldnt1_gather_u64index_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5628,7 +5628,7 @@ pub unsafe fn svldnt1_gather_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5645,7 +5645,7 @@ pub unsafe fn svldnt1_gather_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5673,7 +5673,7 @@ pub unsafe fn svldnt1_gather_s64offset_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5701,7 +5701,7 @@ pub unsafe fn svldnt1_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5718,7 +5718,7 @@ pub unsafe fn svldnt1_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -5746,7 +5746,7 @@ pub unsafe fn svldnt1_gather_u32offset_f32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -5774,7 +5774,7 @@ pub unsafe fn svldnt1_gather_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -5791,7 +5791,7 @@ pub unsafe fn svldnt1_gather_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5808,7 +5808,7 @@ pub unsafe fn svldnt1_gather_u64offset_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5825,7 +5825,7 @@ pub unsafe fn svldnt1_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5843,7 +5843,7 @@ pub unsafe fn svldnt1_gather_u64offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -5857,7 +5857,7 @@ pub unsafe fn svldnt1_gather_u32base_f32(pg: svbool_t, bases: svuint32_t) -> svf #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -5871,7 +5871,7 @@ pub unsafe fn svldnt1_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> svi #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -5885,7 +5885,7 @@ pub unsafe fn svldnt1_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> svu #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5899,7 +5899,7 @@ pub unsafe fn svldnt1_gather_u64base_f64(pg: svbool_t, bases: svuint64_t) -> svf #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5913,7 +5913,7 @@ pub unsafe fn svldnt1_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> svi #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5927,7 +5927,7 @@ pub unsafe fn svldnt1_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> svu #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -5945,7 +5945,7 @@ pub unsafe fn svldnt1_gather_u32base_index_f32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -5963,7 +5963,7 @@ pub unsafe fn svldnt1_gather_u32base_index_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -5981,7 +5981,7 @@ pub unsafe fn svldnt1_gather_u32base_index_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -5999,7 +5999,7 @@ pub unsafe fn svldnt1_gather_u64base_index_f64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -6017,7 +6017,7 @@ pub unsafe fn svldnt1_gather_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -6035,7 +6035,7 @@ pub unsafe fn svldnt1_gather_u64base_index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -6064,7 +6064,7 @@ pub unsafe fn svldnt1_gather_u32base_offset_f32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -6093,7 +6093,7 @@ pub unsafe fn svldnt1_gather_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -6111,7 +6111,7 @@ pub unsafe fn svldnt1_gather_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -6140,7 +6140,7 @@ pub unsafe fn svldnt1_gather_u64base_offset_f64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -6169,7 +6169,7 @@ pub unsafe fn svldnt1_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1d))] @@ -6186,7 +6186,7 @@ pub unsafe fn svldnt1_gather_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6218,7 +6218,7 @@ pub unsafe fn svldnt1sb_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6250,7 +6250,7 @@ pub unsafe fn svldnt1sh_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -6282,7 +6282,7 @@ pub unsafe fn svldnt1sw_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6299,7 +6299,7 @@ pub unsafe fn svldnt1sb_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6316,7 +6316,7 @@ pub unsafe fn svldnt1sh_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -6333,7 +6333,7 @@ pub unsafe fn svldnt1sw_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6365,7 +6365,7 @@ pub unsafe fn svldnt1sb_gather_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6397,7 +6397,7 @@ pub unsafe fn svldnt1sh_gather_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6414,7 +6414,7 @@ pub unsafe fn svldnt1sb_gather_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6431,7 +6431,7 @@ pub unsafe fn svldnt1sh_gather_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6448,7 +6448,7 @@ pub unsafe fn svldnt1sb_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6465,7 +6465,7 @@ pub unsafe fn svldnt1sh_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -6482,7 +6482,7 @@ pub unsafe fn svldnt1sw_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6499,7 +6499,7 @@ pub unsafe fn svldnt1sb_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6516,7 +6516,7 @@ pub unsafe fn svldnt1sh_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -6534,7 +6534,7 @@ pub unsafe fn svldnt1sw_gather_u64offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6567,7 +6567,7 @@ pub unsafe fn svldnt1sb_gather_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6600,7 +6600,7 @@ pub unsafe fn svldnt1sh_gather_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6618,7 +6618,7 @@ pub unsafe fn svldnt1sb_gather_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6636,7 +6636,7 @@ pub unsafe fn svldnt1sh_gather_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6669,7 +6669,7 @@ pub unsafe fn svldnt1sb_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6702,7 +6702,7 @@ pub unsafe fn svldnt1sh_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -6735,7 +6735,7 @@ pub unsafe fn svldnt1sw_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6753,7 +6753,7 @@ pub unsafe fn svldnt1sb_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6771,7 +6771,7 @@ pub unsafe fn svldnt1sh_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -6789,7 +6789,7 @@ pub unsafe fn svldnt1sw_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6803,7 +6803,7 @@ pub unsafe fn svldnt1sb_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6817,7 +6817,7 @@ pub unsafe fn svldnt1sh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6831,7 +6831,7 @@ pub unsafe fn svldnt1sb_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6845,7 +6845,7 @@ pub unsafe fn svldnt1sh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6859,7 +6859,7 @@ pub unsafe fn svldnt1sb_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6873,7 +6873,7 @@ pub unsafe fn svldnt1sh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -6887,7 +6887,7 @@ pub unsafe fn svldnt1sw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sb))] @@ -6901,7 +6901,7 @@ pub unsafe fn svldnt1sb_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6915,7 +6915,7 @@ pub unsafe fn svldnt1sh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -6928,7 +6928,7 @@ pub unsafe fn svldnt1sw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -6956,7 +6956,7 @@ pub unsafe fn svldnt1sh_gather_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -6984,7 +6984,7 @@ pub unsafe fn svldnt1sw_gather_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -7001,7 +7001,7 @@ pub unsafe fn svldnt1sh_gather_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -7018,7 +7018,7 @@ pub unsafe fn svldnt1sw_gather_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -7035,7 +7035,7 @@ pub unsafe fn svldnt1sh_gather_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -7052,7 +7052,7 @@ pub unsafe fn svldnt1sw_gather_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -7069,7 +7069,7 @@ pub unsafe fn svldnt1sh_gather_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -7087,7 +7087,7 @@ pub unsafe fn svldnt1sw_gather_u64index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -7105,7 +7105,7 @@ pub unsafe fn svldnt1sh_gather_u32base_index_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -7123,7 +7123,7 @@ pub unsafe fn svldnt1sh_gather_u32base_index_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -7141,7 +7141,7 @@ pub unsafe fn svldnt1sh_gather_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -7159,7 +7159,7 @@ pub unsafe fn svldnt1sw_gather_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sh))] @@ -7177,7 +7177,7 @@ pub unsafe fn svldnt1sh_gather_u64base_index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1sw))] @@ -7194,7 +7194,7 @@ pub unsafe fn svldnt1sw_gather_u64base_index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7211,7 +7211,7 @@ pub unsafe fn svldnt1ub_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7228,7 +7228,7 @@ pub unsafe fn svldnt1uh_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -7245,7 +7245,7 @@ pub unsafe fn svldnt1uw_gather_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7275,7 +7275,7 @@ pub unsafe fn svldnt1ub_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7305,7 +7305,7 @@ pub unsafe fn svldnt1uh_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -7335,7 +7335,7 @@ pub unsafe fn svldnt1uw_gather_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7352,7 +7352,7 @@ pub unsafe fn svldnt1ub_gather_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7369,7 +7369,7 @@ pub unsafe fn svldnt1uh_gather_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7400,7 +7400,7 @@ pub unsafe fn svldnt1ub_gather_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7431,7 +7431,7 @@ pub unsafe fn svldnt1uh_gather_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7448,7 +7448,7 @@ pub unsafe fn svldnt1ub_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7465,7 +7465,7 @@ pub unsafe fn svldnt1uh_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -7482,7 +7482,7 @@ pub unsafe fn svldnt1uw_gather_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7499,7 +7499,7 @@ pub unsafe fn svldnt1ub_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7516,7 +7516,7 @@ pub unsafe fn svldnt1uh_gather_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -7534,7 +7534,7 @@ pub unsafe fn svldnt1uw_gather_u64offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7552,7 +7552,7 @@ pub unsafe fn svldnt1ub_gather_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7570,7 +7570,7 @@ pub unsafe fn svldnt1uh_gather_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7602,7 +7602,7 @@ pub unsafe fn svldnt1ub_gather_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7634,7 +7634,7 @@ pub unsafe fn svldnt1uh_gather_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7652,7 +7652,7 @@ pub unsafe fn svldnt1ub_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7670,7 +7670,7 @@ pub unsafe fn svldnt1uh_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -7688,7 +7688,7 @@ pub unsafe fn svldnt1uw_gather_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7720,7 +7720,7 @@ pub unsafe fn svldnt1ub_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7752,7 +7752,7 @@ pub unsafe fn svldnt1uh_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -7784,7 +7784,7 @@ pub unsafe fn svldnt1uw_gather_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7798,7 +7798,7 @@ pub unsafe fn svldnt1ub_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7812,7 +7812,7 @@ pub unsafe fn svldnt1uh_gather_u32base_s32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7826,7 +7826,7 @@ pub unsafe fn svldnt1ub_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7840,7 +7840,7 @@ pub unsafe fn svldnt1uh_gather_u32base_u32(pg: svbool_t, bases: svuint32_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7854,7 +7854,7 @@ pub unsafe fn svldnt1ub_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7868,7 +7868,7 @@ pub unsafe fn svldnt1uh_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -7882,7 +7882,7 @@ pub unsafe fn svldnt1uw_gather_u64base_s64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1b))] @@ -7896,7 +7896,7 @@ pub unsafe fn svldnt1ub_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7910,7 +7910,7 @@ pub unsafe fn svldnt1uh_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -7923,7 +7923,7 @@ pub unsafe fn svldnt1uw_gather_u64base_u64(pg: svbool_t, bases: svuint64_t) -> s #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7940,7 +7940,7 @@ pub unsafe fn svldnt1uh_gather_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -7957,7 +7957,7 @@ pub unsafe fn svldnt1uw_gather_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -7987,7 +7987,7 @@ pub unsafe fn svldnt1uh_gather_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -8017,7 +8017,7 @@ pub unsafe fn svldnt1uw_gather_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -8034,7 +8034,7 @@ pub unsafe fn svldnt1uh_gather_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -8051,7 +8051,7 @@ pub unsafe fn svldnt1uw_gather_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -8068,7 +8068,7 @@ pub unsafe fn svldnt1uh_gather_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -8086,7 +8086,7 @@ pub unsafe fn svldnt1uw_gather_u64index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -8104,7 +8104,7 @@ pub unsafe fn svldnt1uh_gather_u32base_index_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -8122,7 +8122,7 @@ pub unsafe fn svldnt1uh_gather_u32base_index_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -8140,7 +8140,7 @@ pub unsafe fn svldnt1uh_gather_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -8158,7 +8158,7 @@ pub unsafe fn svldnt1uw_gather_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1h))] @@ -8176,7 +8176,7 @@ pub unsafe fn svldnt1uh_gather_u64base_index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ldnt1w))] @@ -8189,7 +8189,7 @@ pub unsafe fn svldnt1uw_gather_u64base_index_u64( } #[doc = "Base 2 logarithm as integer"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(flogb))] @@ -8202,7 +8202,7 @@ pub fn svlogb_f32_m(inactive: svint32_t, pg: svbool_t, op: svfloat32_t) -> svint } #[doc = "Base 2 logarithm as integer"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(flogb))] @@ -8211,7 +8211,7 @@ pub fn svlogb_f32_x(pg: svbool_t, op: svfloat32_t) -> svint32_t { } #[doc = "Base 2 logarithm as integer"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(flogb))] @@ -8220,7 +8220,7 @@ pub fn svlogb_f32_z(pg: svbool_t, op: svfloat32_t) -> svint32_t { } #[doc = "Base 2 logarithm as integer"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(flogb))] @@ -8233,7 +8233,7 @@ pub fn svlogb_f64_m(inactive: svint64_t, pg: svbool_t, op: svfloat64_t) -> svint } #[doc = "Base 2 logarithm as integer"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(flogb))] @@ -8242,7 +8242,7 @@ pub fn svlogb_f64_x(pg: svbool_t, op: svfloat64_t) -> svint64_t { } #[doc = "Base 2 logarithm as integer"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svlogb[_f64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(flogb))] @@ -8251,7 +8251,7 @@ pub fn svlogb_f64_z(pg: svbool_t, op: svfloat64_t) -> svint64_t { } #[doc = "Detect any matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(match))] @@ -8264,7 +8264,7 @@ pub fn svmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { } #[doc = "Detect any matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(match))] @@ -8277,7 +8277,7 @@ pub fn svmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { } #[doc = "Detect any matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(match))] @@ -8286,7 +8286,7 @@ pub fn svmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { } #[doc = "Detect any matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmatch[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(match))] @@ -8295,7 +8295,7 @@ pub fn svmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t { } #[doc = "Maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnmp))] @@ -8311,7 +8311,7 @@ pub fn svmaxnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svflo } #[doc = "Maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnmp))] @@ -8320,7 +8320,7 @@ pub fn svmaxnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svflo } #[doc = "Maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnmp))] @@ -8336,7 +8336,7 @@ pub fn svmaxnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svflo } #[doc = "Maximum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxnmp[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxnmp))] @@ -8345,7 +8345,7 @@ pub fn svmaxnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svflo } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -8358,7 +8358,7 @@ pub fn svmaxp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -8367,7 +8367,7 @@ pub fn svmaxp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -8380,7 +8380,7 @@ pub fn svmaxp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmaxp))] @@ -8389,7 +8389,7 @@ pub fn svmaxp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxp))] @@ -8402,7 +8402,7 @@ pub fn svmaxp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxp))] @@ -8411,7 +8411,7 @@ pub fn svmaxp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxp))] @@ -8424,7 +8424,7 @@ pub fn svmaxp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxp))] @@ -8433,7 +8433,7 @@ pub fn svmaxp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxp))] @@ -8446,7 +8446,7 @@ pub fn svmaxp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxp))] @@ -8455,7 +8455,7 @@ pub fn svmaxp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxp))] @@ -8468,7 +8468,7 @@ pub fn svmaxp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smaxp))] @@ -8477,7 +8477,7 @@ pub fn svmaxp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxp))] @@ -8490,7 +8490,7 @@ pub fn svmaxp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxp))] @@ -8499,7 +8499,7 @@ pub fn svmaxp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxp))] @@ -8512,7 +8512,7 @@ pub fn svmaxp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxp))] @@ -8521,7 +8521,7 @@ pub fn svmaxp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxp))] @@ -8534,7 +8534,7 @@ pub fn svmaxp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxp))] @@ -8543,7 +8543,7 @@ pub fn svmaxp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxp))] @@ -8556,7 +8556,7 @@ pub fn svmaxp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Maximum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmaxp[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umaxp))] @@ -8565,7 +8565,7 @@ pub fn svmaxp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnmp))] @@ -8581,7 +8581,7 @@ pub fn svminnmp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svflo } #[doc = "Minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnmp))] @@ -8590,7 +8590,7 @@ pub fn svminnmp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svflo } #[doc = "Minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnmp))] @@ -8606,7 +8606,7 @@ pub fn svminnmp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svflo } #[doc = "Minimum number pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminnmp[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminnmp))] @@ -8615,7 +8615,7 @@ pub fn svminnmp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svflo } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminp))] @@ -8628,7 +8628,7 @@ pub fn svminp_f32_m(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminp))] @@ -8637,7 +8637,7 @@ pub fn svminp_f32_x(pg: svbool_t, op1: svfloat32_t, op2: svfloat32_t) -> svfloat } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminp))] @@ -8650,7 +8650,7 @@ pub fn svminp_f64_m(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_f64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fminp))] @@ -8659,7 +8659,7 @@ pub fn svminp_f64_x(pg: svbool_t, op1: svfloat64_t, op2: svfloat64_t) -> svfloat } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminp))] @@ -8672,7 +8672,7 @@ pub fn svminp_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminp))] @@ -8681,7 +8681,7 @@ pub fn svminp_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminp))] @@ -8694,7 +8694,7 @@ pub fn svminp_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminp))] @@ -8703,7 +8703,7 @@ pub fn svminp_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminp))] @@ -8716,7 +8716,7 @@ pub fn svminp_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminp))] @@ -8725,7 +8725,7 @@ pub fn svminp_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminp))] @@ -8738,7 +8738,7 @@ pub fn svminp_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sminp))] @@ -8747,7 +8747,7 @@ pub fn svminp_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminp))] @@ -8760,7 +8760,7 @@ pub fn svminp_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminp))] @@ -8769,7 +8769,7 @@ pub fn svminp_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminp))] @@ -8782,7 +8782,7 @@ pub fn svminp_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminp))] @@ -8791,7 +8791,7 @@ pub fn svminp_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminp))] @@ -8804,7 +8804,7 @@ pub fn svminp_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminp))] @@ -8813,7 +8813,7 @@ pub fn svminp_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminp))] @@ -8826,7 +8826,7 @@ pub fn svminp_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Minimum pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svminp[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uminp))] @@ -8835,7 +8835,7 @@ pub fn svminp_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] @@ -8861,7 +8861,7 @@ pub fn svmla_lane_s16( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] @@ -8887,7 +8887,7 @@ pub fn svmla_lane_s32( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] @@ -8913,7 +8913,7 @@ pub fn svmla_lane_s64( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] @@ -8929,7 +8929,7 @@ pub fn svmla_lane_u16( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] @@ -8945,7 +8945,7 @@ pub fn svmla_lane_u32( } #[doc = "Multiply-add, addend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmla_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mla, IMM_INDEX = 0))] @@ -8961,7 +8961,7 @@ pub fn svmla_lane_u64( } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] @@ -8987,7 +8987,7 @@ pub fn svmlalb_lane_s32( } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalb, IMM_INDEX = 0))] @@ -9013,7 +9013,7 @@ pub fn svmlalb_lane_s64( } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] @@ -9042,7 +9042,7 @@ pub fn svmlalb_lane_u32( } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalb, IMM_INDEX = 0))] @@ -9071,7 +9071,7 @@ pub fn svmlalb_lane_u64( } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalb))] @@ -9084,7 +9084,7 @@ pub fn svmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalb))] @@ -9093,7 +9093,7 @@ pub fn svmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalb))] @@ -9106,7 +9106,7 @@ pub fn svmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalb))] @@ -9115,7 +9115,7 @@ pub fn svmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalb))] @@ -9128,7 +9128,7 @@ pub fn svmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalb))] @@ -9137,7 +9137,7 @@ pub fn svmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalb))] @@ -9150,7 +9150,7 @@ pub fn svmlalb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_ } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalb))] @@ -9159,7 +9159,7 @@ pub fn svmlalb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalb))] @@ -9172,7 +9172,7 @@ pub fn svmlalb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint3 } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalb))] @@ -9181,7 +9181,7 @@ pub fn svmlalb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalb))] @@ -9194,7 +9194,7 @@ pub fn svmlalb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint6 } #[doc = "Multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalb))] @@ -9203,7 +9203,7 @@ pub fn svmlalb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] @@ -9229,7 +9229,7 @@ pub fn svmlalt_lane_s32( } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalt, IMM_INDEX = 0))] @@ -9255,7 +9255,7 @@ pub fn svmlalt_lane_s64( } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] @@ -9284,7 +9284,7 @@ pub fn svmlalt_lane_u32( } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalt, IMM_INDEX = 0))] @@ -9313,7 +9313,7 @@ pub fn svmlalt_lane_u64( } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalt))] @@ -9326,7 +9326,7 @@ pub fn svmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalt))] @@ -9335,7 +9335,7 @@ pub fn svmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalt))] @@ -9348,7 +9348,7 @@ pub fn svmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalt))] @@ -9357,7 +9357,7 @@ pub fn svmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalt))] @@ -9370,7 +9370,7 @@ pub fn svmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlalt))] @@ -9379,7 +9379,7 @@ pub fn svmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalt))] @@ -9392,7 +9392,7 @@ pub fn svmlalt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_ } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalt))] @@ -9401,7 +9401,7 @@ pub fn svmlalt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalt))] @@ -9414,7 +9414,7 @@ pub fn svmlalt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint3 } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalt))] @@ -9423,7 +9423,7 @@ pub fn svmlalt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalt))] @@ -9436,7 +9436,7 @@ pub fn svmlalt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint6 } #[doc = "Multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlalt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlalt))] @@ -9445,7 +9445,7 @@ pub fn svmlalt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] @@ -9471,7 +9471,7 @@ pub fn svmls_lane_s16( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] @@ -9497,7 +9497,7 @@ pub fn svmls_lane_s32( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] @@ -9523,7 +9523,7 @@ pub fn svmls_lane_s64( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] @@ -9539,7 +9539,7 @@ pub fn svmls_lane_u16( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] @@ -9555,7 +9555,7 @@ pub fn svmls_lane_u32( } #[doc = "Multiply-subtract, minuend first"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmls_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mls, IMM_INDEX = 0))] @@ -9571,7 +9571,7 @@ pub fn svmls_lane_u64( } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] @@ -9597,7 +9597,7 @@ pub fn svmlslb_lane_s32( } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslb, IMM_INDEX = 0))] @@ -9623,7 +9623,7 @@ pub fn svmlslb_lane_s64( } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] @@ -9652,7 +9652,7 @@ pub fn svmlslb_lane_u32( } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslb, IMM_INDEX = 0))] @@ -9681,7 +9681,7 @@ pub fn svmlslb_lane_u64( } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslb))] @@ -9694,7 +9694,7 @@ pub fn svmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslb))] @@ -9703,7 +9703,7 @@ pub fn svmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslb))] @@ -9716,7 +9716,7 @@ pub fn svmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslb))] @@ -9725,7 +9725,7 @@ pub fn svmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslb))] @@ -9738,7 +9738,7 @@ pub fn svmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslb))] @@ -9747,7 +9747,7 @@ pub fn svmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslb))] @@ -9760,7 +9760,7 @@ pub fn svmlslb_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_ } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslb))] @@ -9769,7 +9769,7 @@ pub fn svmlslb_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslb))] @@ -9782,7 +9782,7 @@ pub fn svmlslb_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint3 } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslb))] @@ -9791,7 +9791,7 @@ pub fn svmlslb_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslb))] @@ -9804,7 +9804,7 @@ pub fn svmlslb_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint6 } #[doc = "Multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslb))] @@ -9813,7 +9813,7 @@ pub fn svmlslb_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] @@ -9839,7 +9839,7 @@ pub fn svmlslt_lane_s32( } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslt, IMM_INDEX = 0))] @@ -9865,7 +9865,7 @@ pub fn svmlslt_lane_s64( } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] @@ -9894,7 +9894,7 @@ pub fn svmlslt_lane_u32( } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslt, IMM_INDEX = 0))] @@ -9923,7 +9923,7 @@ pub fn svmlslt_lane_u64( } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslt))] @@ -9936,7 +9936,7 @@ pub fn svmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t { } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslt))] @@ -9945,7 +9945,7 @@ pub fn svmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslt))] @@ -9958,7 +9958,7 @@ pub fn svmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_t } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslt))] @@ -9967,7 +9967,7 @@ pub fn svmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslt))] @@ -9980,7 +9980,7 @@ pub fn svmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_t } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smlslt))] @@ -9989,7 +9989,7 @@ pub fn svmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslt))] @@ -10002,7 +10002,7 @@ pub fn svmlslt_u16(op1: svuint16_t, op2: svuint8_t, op3: svuint8_t) -> svuint16_ } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslt))] @@ -10011,7 +10011,7 @@ pub fn svmlslt_n_u16(op1: svuint16_t, op2: svuint8_t, op3: u8) -> svuint16_t { } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslt))] @@ -10024,7 +10024,7 @@ pub fn svmlslt_u32(op1: svuint32_t, op2: svuint16_t, op3: svuint16_t) -> svuint3 } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslt))] @@ -10033,7 +10033,7 @@ pub fn svmlslt_n_u32(op1: svuint32_t, op2: svuint16_t, op3: u16) -> svuint32_t { } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslt))] @@ -10046,7 +10046,7 @@ pub fn svmlslt_u64(op1: svuint64_t, op2: svuint32_t, op3: svuint32_t) -> svuint6 } #[doc = "Multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmlslt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umlslt))] @@ -10055,7 +10055,7 @@ pub fn svmlslt_n_u64(op1: svuint64_t, op2: svuint32_t, op3: u32) -> svuint64_t { } #[doc = "Move long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllb))] @@ -10064,7 +10064,7 @@ pub fn svmovlb_s16(op: svint8_t) -> svint16_t { } #[doc = "Move long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllb))] @@ -10073,7 +10073,7 @@ pub fn svmovlb_s32(op: svint16_t) -> svint32_t { } #[doc = "Move long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllb))] @@ -10082,7 +10082,7 @@ pub fn svmovlb_s64(op: svint32_t) -> svint64_t { } #[doc = "Move long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllb))] @@ -10091,7 +10091,7 @@ pub fn svmovlb_u16(op: svuint8_t) -> svuint16_t { } #[doc = "Move long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllb))] @@ -10100,7 +10100,7 @@ pub fn svmovlb_u32(op: svuint16_t) -> svuint32_t { } #[doc = "Move long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllb))] @@ -10109,7 +10109,7 @@ pub fn svmovlb_u64(op: svuint32_t) -> svuint64_t { } #[doc = "Move long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllt))] @@ -10118,7 +10118,7 @@ pub fn svmovlt_s16(op: svint8_t) -> svint16_t { } #[doc = "Move long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllt))] @@ -10127,7 +10127,7 @@ pub fn svmovlt_s32(op: svint16_t) -> svint32_t { } #[doc = "Move long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllt))] @@ -10136,7 +10136,7 @@ pub fn svmovlt_s64(op: svint32_t) -> svint64_t { } #[doc = "Move long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllt))] @@ -10145,7 +10145,7 @@ pub fn svmovlt_u16(op: svuint8_t) -> svuint16_t { } #[doc = "Move long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllt))] @@ -10154,7 +10154,7 @@ pub fn svmovlt_u32(op: svuint16_t) -> svuint32_t { } #[doc = "Move long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmovlt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllt))] @@ -10163,7 +10163,7 @@ pub fn svmovlt_u64(op: svuint32_t) -> svuint64_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] @@ -10180,7 +10180,7 @@ pub fn svmul_lane_f32(op1: svfloat32_t, op2: svfloat32_t) } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(fmul, IMM_INDEX = 0))] @@ -10197,7 +10197,7 @@ pub fn svmul_lane_f64(op1: svfloat64_t, op2: svfloat64_t) } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] @@ -10214,7 +10214,7 @@ pub fn svmul_lane_s16(op1: svint16_t, op2: svint16_t) -> s } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] @@ -10231,7 +10231,7 @@ pub fn svmul_lane_s32(op1: svint32_t, op2: svint32_t) -> s } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] @@ -10248,7 +10248,7 @@ pub fn svmul_lane_s64(op1: svint64_t, op2: svint64_t) -> s } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] @@ -10258,7 +10258,7 @@ pub fn svmul_lane_u16(op1: svuint16_t, op2: svuint16_t) -> } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] @@ -10268,7 +10268,7 @@ pub fn svmul_lane_u32(op1: svuint32_t, op2: svuint32_t) -> } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmul_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(mul, IMM_INDEX = 0))] @@ -10278,7 +10278,7 @@ pub fn svmul_lane_u64(op1: svuint64_t, op2: svuint64_t) -> } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr( @@ -10298,7 +10298,7 @@ pub fn svmullb_lane_s32(op1: svint16_t, op2: svint16_t) -> } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr( @@ -10318,7 +10318,7 @@ pub fn svmullb_lane_s64(op1: svint32_t, op2: svint32_t) -> } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr( @@ -10338,7 +10338,7 @@ pub fn svmullb_lane_u32(op1: svuint16_t, op2: svuint16_t) } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr( @@ -10358,7 +10358,7 @@ pub fn svmullb_lane_u64(op1: svuint32_t, op2: svuint32_t) } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smullb))] @@ -10371,7 +10371,7 @@ pub fn svmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smullb))] @@ -10380,7 +10380,7 @@ pub fn svmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smullb))] @@ -10393,7 +10393,7 @@ pub fn svmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smullb))] @@ -10402,7 +10402,7 @@ pub fn svmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smullb))] @@ -10415,7 +10415,7 @@ pub fn svmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(smullb))] @@ -10424,7 +10424,7 @@ pub fn svmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umullb))] @@ -10437,7 +10437,7 @@ pub fn svmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umullb))] @@ -10446,7 +10446,7 @@ pub fn svmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umullb))] @@ -10459,7 +10459,7 @@ pub fn svmullb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umullb))] @@ -10468,7 +10468,7 @@ pub fn svmullb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umullb))] @@ -10481,7 +10481,7 @@ pub fn svmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(umullb))] @@ -10490,7 +10490,7 @@ pub fn svmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr( @@ -10510,7 +10510,7 @@ pub fn svmullt_lane_s32(op1: svint16_t, op2: svint16_t) -> } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr( @@ -10530,7 +10530,7 @@ pub fn svmullt_lane_s64(op1: svint32_t, op2: svint32_t) -> } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr( @@ -10550,7 +10550,7 @@ pub fn svmullt_lane_u32(op1: svuint16_t, op2: svuint16_t) } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt_lane[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr( @@ -10570,7 +10570,7 @@ pub fn svmullt_lane_u64(op1: svuint32_t, op2: svuint32_t) } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] @@ -10583,7 +10583,7 @@ pub fn svmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] @@ -10592,7 +10592,7 @@ pub fn svmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] @@ -10605,7 +10605,7 @@ pub fn svmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] @@ -10614,7 +10614,7 @@ pub fn svmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] @@ -10627,7 +10627,7 @@ pub fn svmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(smullt))] @@ -10636,7 +10636,7 @@ pub fn svmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] @@ -10649,7 +10649,7 @@ pub fn svmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] @@ -10658,7 +10658,7 @@ pub fn svmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] @@ -10671,7 +10671,7 @@ pub fn svmullt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] @@ -10680,7 +10680,7 @@ pub fn svmullt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] @@ -10693,7 +10693,7 @@ pub fn svmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svmullt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(all(test, not(target_env = "msvc")), assert_instr(umullt))] @@ -10702,7 +10702,7 @@ pub fn svmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10715,7 +10715,7 @@ pub fn svnbsl_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10724,7 +10724,7 @@ pub fn svnbsl_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10737,7 +10737,7 @@ pub fn svnbsl_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10746,7 +10746,7 @@ pub fn svnbsl_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10759,7 +10759,7 @@ pub fn svnbsl_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10768,7 +10768,7 @@ pub fn svnbsl_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10781,7 +10781,7 @@ pub fn svnbsl_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10790,7 +10790,7 @@ pub fn svnbsl_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10799,7 +10799,7 @@ pub fn svnbsl_u8(op1: svuint8_t, op2: svuint8_t, op3: svuint8_t) -> svuint8_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10808,7 +10808,7 @@ pub fn svnbsl_n_u8(op1: svuint8_t, op2: svuint8_t, op3: u8) -> svuint8_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10817,7 +10817,7 @@ pub fn svnbsl_u16(op1: svuint16_t, op2: svuint16_t, op3: svuint16_t) -> svuint16 } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10826,7 +10826,7 @@ pub fn svnbsl_n_u16(op1: svuint16_t, op2: svuint16_t, op3: u16) -> svuint16_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10835,7 +10835,7 @@ pub fn svnbsl_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint32 } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10844,7 +10844,7 @@ pub fn svnbsl_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10853,7 +10853,7 @@ pub fn svnbsl_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint64 } #[doc = "Bitwise select"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnbsl[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nbsl))] @@ -10862,7 +10862,7 @@ pub fn svnbsl_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Detect no matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nmatch))] @@ -10875,7 +10875,7 @@ pub fn svnmatch_s8(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svbool_t { } #[doc = "Detect no matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nmatch))] @@ -10888,7 +10888,7 @@ pub fn svnmatch_s16(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svbool_t { } #[doc = "Detect no matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nmatch))] @@ -10897,7 +10897,7 @@ pub fn svnmatch_u8(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svbool_t { } #[doc = "Detect no matching elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svnmatch[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(nmatch))] @@ -10906,7 +10906,7 @@ pub fn svnmatch_u16(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svbool_t } #[doc = "Polynomial multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmul))] @@ -10919,7 +10919,7 @@ pub fn svpmul_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Polynomial multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmul[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmul))] @@ -10928,7 +10928,7 @@ pub fn svpmul_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Polynomial multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullb))] @@ -10944,7 +10944,7 @@ pub fn svpmullb_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Polynomial multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullb))] @@ -10953,7 +10953,7 @@ pub fn svpmullb_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Polynomial multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullb))] @@ -10969,7 +10969,7 @@ pub fn svpmullb_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Polynomial multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullb))] @@ -10978,7 +10978,7 @@ pub fn svpmullb_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Polynomial multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullb))] @@ -10994,7 +10994,7 @@ pub fn svpmullb_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Polynomial multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb_pair[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullb))] @@ -11003,7 +11003,7 @@ pub fn svpmullb_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Polynomial multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullb))] @@ -11012,7 +11012,7 @@ pub fn svpmullb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Polynomial multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullb))] @@ -11021,7 +11021,7 @@ pub fn svpmullb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { } #[doc = "Polynomial multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullb))] @@ -11030,7 +11030,7 @@ pub fn svpmullb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Polynomial multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullb))] @@ -11039,7 +11039,7 @@ pub fn svpmullb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { } #[doc = "Polynomial multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullt))] @@ -11055,7 +11055,7 @@ pub fn svpmullt_pair_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Polynomial multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullt))] @@ -11064,7 +11064,7 @@ pub fn svpmullt_pair_n_u8(op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Polynomial multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullt))] @@ -11080,7 +11080,7 @@ pub fn svpmullt_pair_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Polynomial multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullt))] @@ -11089,7 +11089,7 @@ pub fn svpmullt_pair_n_u32(op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Polynomial multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullt))] @@ -11105,7 +11105,7 @@ pub fn svpmullt_pair_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Polynomial multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt_pair[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullt))] @@ -11114,7 +11114,7 @@ pub fn svpmullt_pair_n_u64(op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Polynomial multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullt))] @@ -11123,7 +11123,7 @@ pub fn svpmullt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Polynomial multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullt))] @@ -11132,7 +11132,7 @@ pub fn svpmullt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { } #[doc = "Polynomial multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullt))] @@ -11141,7 +11141,7 @@ pub fn svpmullt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Polynomial multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svpmullt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-aes")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(pmullt))] @@ -11150,7 +11150,7 @@ pub fn svpmullt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11163,7 +11163,7 @@ pub fn svqabs_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11172,7 +11172,7 @@ pub fn svqabs_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11181,7 +11181,7 @@ pub fn svqabs_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11194,7 +11194,7 @@ pub fn svqabs_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16 } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11203,7 +11203,7 @@ pub fn svqabs_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11212,7 +11212,7 @@ pub fn svqabs_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11225,7 +11225,7 @@ pub fn svqabs_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32 } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11234,7 +11234,7 @@ pub fn svqabs_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11243,7 +11243,7 @@ pub fn svqabs_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11256,7 +11256,7 @@ pub fn svqabs_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64 } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11265,7 +11265,7 @@ pub fn svqabs_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Saturating absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqabs[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqabs))] @@ -11274,7 +11274,7 @@ pub fn svqabs_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11287,7 +11287,7 @@ pub fn svqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11296,7 +11296,7 @@ pub fn svqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11305,7 +11305,7 @@ pub fn svqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11314,7 +11314,7 @@ pub fn svqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11323,7 +11323,7 @@ pub fn svqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11332,7 +11332,7 @@ pub fn svqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11345,7 +11345,7 @@ pub fn svqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11354,7 +11354,7 @@ pub fn svqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11363,7 +11363,7 @@ pub fn svqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11372,7 +11372,7 @@ pub fn svqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11381,7 +11381,7 @@ pub fn svqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11390,7 +11390,7 @@ pub fn svqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11403,7 +11403,7 @@ pub fn svqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11412,7 +11412,7 @@ pub fn svqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11421,7 +11421,7 @@ pub fn svqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11430,7 +11430,7 @@ pub fn svqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11439,7 +11439,7 @@ pub fn svqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11448,7 +11448,7 @@ pub fn svqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11461,7 +11461,7 @@ pub fn svqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11470,7 +11470,7 @@ pub fn svqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11479,7 +11479,7 @@ pub fn svqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11488,7 +11488,7 @@ pub fn svqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11497,7 +11497,7 @@ pub fn svqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqadd))] @@ -11506,7 +11506,7 @@ pub fn svqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11519,7 +11519,7 @@ pub fn svqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11528,7 +11528,7 @@ pub fn svqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11537,7 +11537,7 @@ pub fn svqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11546,7 +11546,7 @@ pub fn svqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11555,7 +11555,7 @@ pub fn svqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11564,7 +11564,7 @@ pub fn svqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11577,7 +11577,7 @@ pub fn svqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11586,7 +11586,7 @@ pub fn svqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11595,7 +11595,7 @@ pub fn svqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11604,7 +11604,7 @@ pub fn svqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11613,7 +11613,7 @@ pub fn svqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11622,7 +11622,7 @@ pub fn svqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11635,7 +11635,7 @@ pub fn svqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11644,7 +11644,7 @@ pub fn svqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11653,7 +11653,7 @@ pub fn svqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11662,7 +11662,7 @@ pub fn svqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11671,7 +11671,7 @@ pub fn svqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11680,7 +11680,7 @@ pub fn svqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11693,7 +11693,7 @@ pub fn svqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11702,7 +11702,7 @@ pub fn svqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11711,7 +11711,7 @@ pub fn svqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11720,7 +11720,7 @@ pub fn svqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11729,7 +11729,7 @@ pub fn svqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqadd[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqadd))] @@ -11738,7 +11738,7 @@ pub fn svqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Saturating complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] @@ -11755,7 +11755,7 @@ pub fn svqcadd_s8(op1: svint8_t, op2: svint8_t) -> svin } #[doc = "Saturating complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] @@ -11772,7 +11772,7 @@ pub fn svqcadd_s16(op1: svint16_t, op2: svint16_t) -> s } #[doc = "Saturating complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] @@ -11789,7 +11789,7 @@ pub fn svqcadd_s32(op1: svint32_t, op2: svint32_t) -> s } #[doc = "Saturating complex add with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqcadd[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqcadd, IMM_ROTATION = 90))] @@ -11806,7 +11806,7 @@ pub fn svqcadd_s64(op1: svint64_t, op2: svint64_t) -> s } #[doc = "Saturating doubling multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] @@ -11832,7 +11832,7 @@ pub fn svqdmlalb_lane_s32( } #[doc = "Saturating doubling multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalb, IMM_INDEX = 0))] @@ -11858,7 +11858,7 @@ pub fn svqdmlalb_lane_s64( } #[doc = "Saturating doubling multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalb))] @@ -11874,7 +11874,7 @@ pub fn svqdmlalb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t } #[doc = "Saturating doubling multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalb))] @@ -11883,7 +11883,7 @@ pub fn svqdmlalb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Saturating doubling multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalb))] @@ -11899,7 +11899,7 @@ pub fn svqdmlalb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_ } #[doc = "Saturating doubling multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalb))] @@ -11908,7 +11908,7 @@ pub fn svqdmlalb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Saturating doubling multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalb))] @@ -11924,7 +11924,7 @@ pub fn svqdmlalb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_ } #[doc = "Saturating doubling multiply-add long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalb))] @@ -11933,7 +11933,7 @@ pub fn svqdmlalb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Saturating doubling multiply-add long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalbt))] @@ -11949,7 +11949,7 @@ pub fn svqdmlalbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t } #[doc = "Saturating doubling multiply-add long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalbt))] @@ -11958,7 +11958,7 @@ pub fn svqdmlalbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Saturating doubling multiply-add long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalbt))] @@ -11974,7 +11974,7 @@ pub fn svqdmlalbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32 } #[doc = "Saturating doubling multiply-add long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalbt))] @@ -11983,7 +11983,7 @@ pub fn svqdmlalbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Saturating doubling multiply-add long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalbt))] @@ -11999,7 +11999,7 @@ pub fn svqdmlalbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64 } #[doc = "Saturating doubling multiply-add long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalbt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalbt))] @@ -12008,7 +12008,7 @@ pub fn svqdmlalbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Saturating doubling multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] @@ -12034,7 +12034,7 @@ pub fn svqdmlalt_lane_s32( } #[doc = "Saturating doubling multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalt, IMM_INDEX = 0))] @@ -12060,7 +12060,7 @@ pub fn svqdmlalt_lane_s64( } #[doc = "Saturating doubling multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalt))] @@ -12076,7 +12076,7 @@ pub fn svqdmlalt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t } #[doc = "Saturating doubling multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalt))] @@ -12085,7 +12085,7 @@ pub fn svqdmlalt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Saturating doubling multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalt))] @@ -12101,7 +12101,7 @@ pub fn svqdmlalt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_ } #[doc = "Saturating doubling multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalt))] @@ -12110,7 +12110,7 @@ pub fn svqdmlalt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Saturating doubling multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalt))] @@ -12126,7 +12126,7 @@ pub fn svqdmlalt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_ } #[doc = "Saturating doubling multiply-add long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlalt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlalt))] @@ -12135,7 +12135,7 @@ pub fn svqdmlalt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Saturating doubling multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] @@ -12161,7 +12161,7 @@ pub fn svqdmlslb_lane_s32( } #[doc = "Saturating doubling multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslb, IMM_INDEX = 0))] @@ -12187,7 +12187,7 @@ pub fn svqdmlslb_lane_s64( } #[doc = "Saturating doubling multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslb))] @@ -12203,7 +12203,7 @@ pub fn svqdmlslb_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t } #[doc = "Saturating doubling multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslb))] @@ -12212,7 +12212,7 @@ pub fn svqdmlslb_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Saturating doubling multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslb))] @@ -12228,7 +12228,7 @@ pub fn svqdmlslb_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_ } #[doc = "Saturating doubling multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslb))] @@ -12237,7 +12237,7 @@ pub fn svqdmlslb_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Saturating doubling multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslb))] @@ -12253,7 +12253,7 @@ pub fn svqdmlslb_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_ } #[doc = "Saturating doubling multiply-subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslb))] @@ -12262,7 +12262,7 @@ pub fn svqdmlslb_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Saturating doubling multiply-subtract long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslbt))] @@ -12278,7 +12278,7 @@ pub fn svqdmlslbt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t } #[doc = "Saturating doubling multiply-subtract long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslbt))] @@ -12287,7 +12287,7 @@ pub fn svqdmlslbt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Saturating doubling multiply-subtract long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslbt))] @@ -12303,7 +12303,7 @@ pub fn svqdmlslbt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32 } #[doc = "Saturating doubling multiply-subtract long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslbt))] @@ -12312,7 +12312,7 @@ pub fn svqdmlslbt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Saturating doubling multiply-subtract long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslbt))] @@ -12328,7 +12328,7 @@ pub fn svqdmlslbt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64 } #[doc = "Saturating doubling multiply-subtract long (bottom × top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslbt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslbt))] @@ -12337,7 +12337,7 @@ pub fn svqdmlslbt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Saturating doubling multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] @@ -12363,7 +12363,7 @@ pub fn svqdmlslt_lane_s32( } #[doc = "Saturating doubling multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslt, IMM_INDEX = 0))] @@ -12389,7 +12389,7 @@ pub fn svqdmlslt_lane_s64( } #[doc = "Saturating doubling multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslt))] @@ -12405,7 +12405,7 @@ pub fn svqdmlslt_s16(op1: svint16_t, op2: svint8_t, op3: svint8_t) -> svint16_t } #[doc = "Saturating doubling multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslt))] @@ -12414,7 +12414,7 @@ pub fn svqdmlslt_n_s16(op1: svint16_t, op2: svint8_t, op3: i8) -> svint16_t { } #[doc = "Saturating doubling multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslt))] @@ -12430,7 +12430,7 @@ pub fn svqdmlslt_s32(op1: svint32_t, op2: svint16_t, op3: svint16_t) -> svint32_ } #[doc = "Saturating doubling multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslt))] @@ -12439,7 +12439,7 @@ pub fn svqdmlslt_n_s32(op1: svint32_t, op2: svint16_t, op3: i16) -> svint32_t { } #[doc = "Saturating doubling multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslt))] @@ -12455,7 +12455,7 @@ pub fn svqdmlslt_s64(op1: svint64_t, op2: svint32_t, op3: svint32_t) -> svint64_ } #[doc = "Saturating doubling multiply-subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmlslt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmlslt))] @@ -12464,7 +12464,7 @@ pub fn svqdmlslt_n_s64(op1: svint64_t, op2: svint32_t, op3: i32) -> svint64_t { } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] @@ -12481,7 +12481,7 @@ pub fn svqdmulh_lane_s16(op1: svint16_t, op2: svint16_t) - } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] @@ -12498,7 +12498,7 @@ pub fn svqdmulh_lane_s32(op1: svint32_t, op2: svint32_t) - } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh, IMM_INDEX = 0))] @@ -12515,7 +12515,7 @@ pub fn svqdmulh_lane_s64(op1: svint64_t, op2: svint64_t) - } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh))] @@ -12531,7 +12531,7 @@ pub fn svqdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh))] @@ -12540,7 +12540,7 @@ pub fn svqdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh))] @@ -12556,7 +12556,7 @@ pub fn svqdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh))] @@ -12565,7 +12565,7 @@ pub fn svqdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh))] @@ -12581,7 +12581,7 @@ pub fn svqdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh))] @@ -12590,7 +12590,7 @@ pub fn svqdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh))] @@ -12606,7 +12606,7 @@ pub fn svqdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmulh[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmulh))] @@ -12615,7 +12615,7 @@ pub fn svqdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating doubling multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] @@ -12632,7 +12632,7 @@ pub fn svqdmullb_lane_s32(op1: svint16_t, op2: svint16_t) } #[doc = "Saturating doubling multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullb, IMM_INDEX = 0))] @@ -12649,7 +12649,7 @@ pub fn svqdmullb_lane_s64(op1: svint32_t, op2: svint32_t) } #[doc = "Saturating doubling multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullb))] @@ -12665,7 +12665,7 @@ pub fn svqdmullb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Saturating doubling multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullb))] @@ -12674,7 +12674,7 @@ pub fn svqdmullb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Saturating doubling multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullb))] @@ -12690,7 +12690,7 @@ pub fn svqdmullb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Saturating doubling multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullb))] @@ -12699,7 +12699,7 @@ pub fn svqdmullb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Saturating doubling multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullb))] @@ -12715,7 +12715,7 @@ pub fn svqdmullb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Saturating doubling multiply long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullb))] @@ -12724,7 +12724,7 @@ pub fn svqdmullb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Saturating doubling multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] @@ -12741,7 +12741,7 @@ pub fn svqdmullt_lane_s32(op1: svint16_t, op2: svint16_t) } #[doc = "Saturating doubling multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullt, IMM_INDEX = 0))] @@ -12758,7 +12758,7 @@ pub fn svqdmullt_lane_s64(op1: svint32_t, op2: svint32_t) } #[doc = "Saturating doubling multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullt))] @@ -12774,7 +12774,7 @@ pub fn svqdmullt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Saturating doubling multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullt))] @@ -12783,7 +12783,7 @@ pub fn svqdmullt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Saturating doubling multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullt))] @@ -12799,7 +12799,7 @@ pub fn svqdmullt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Saturating doubling multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullt))] @@ -12808,7 +12808,7 @@ pub fn svqdmullt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Saturating doubling multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullt))] @@ -12824,7 +12824,7 @@ pub fn svqdmullt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Saturating doubling multiply long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqdmullt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqdmullt))] @@ -12833,7 +12833,7 @@ pub fn svqdmullt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12846,7 +12846,7 @@ pub fn svqneg_s8_m(inactive: svint8_t, pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12855,7 +12855,7 @@ pub fn svqneg_s8_x(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12864,7 +12864,7 @@ pub fn svqneg_s8_z(pg: svbool_t, op: svint8_t) -> svint8_t { } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12877,7 +12877,7 @@ pub fn svqneg_s16_m(inactive: svint16_t, pg: svbool_t, op: svint16_t) -> svint16 } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12886,7 +12886,7 @@ pub fn svqneg_s16_x(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12895,7 +12895,7 @@ pub fn svqneg_s16_z(pg: svbool_t, op: svint16_t) -> svint16_t { } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12908,7 +12908,7 @@ pub fn svqneg_s32_m(inactive: svint32_t, pg: svbool_t, op: svint32_t) -> svint32 } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12917,7 +12917,7 @@ pub fn svqneg_s32_x(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12926,7 +12926,7 @@ pub fn svqneg_s32_z(pg: svbool_t, op: svint32_t) -> svint32_t { } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12939,7 +12939,7 @@ pub fn svqneg_s64_m(inactive: svint64_t, pg: svbool_t, op: svint64_t) -> svint64 } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12948,7 +12948,7 @@ pub fn svqneg_s64_x(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqneg[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqneg))] @@ -12957,7 +12957,7 @@ pub fn svqneg_s64_z(pg: svbool_t, op: svint64_t) -> svint64_t { } #[doc = "Saturating rounding doubling complex multiply-add high with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] @@ -12987,7 +12987,7 @@ pub fn svqrdcmlah_lane_s16( } #[doc = "Saturating rounding doubling complex multiply-add high with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdcmlah, IMM_INDEX = 0, IMM_ROTATION = 90))] @@ -13017,7 +13017,7 @@ pub fn svqrdcmlah_lane_s32( } #[doc = "Saturating rounding doubling complex multiply-add high with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] @@ -13045,7 +13045,7 @@ pub fn svqrdcmlah_s8( } #[doc = "Saturating rounding doubling complex multiply-add high with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] @@ -13073,7 +13073,7 @@ pub fn svqrdcmlah_s16( } #[doc = "Saturating rounding doubling complex multiply-add high with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] @@ -13101,7 +13101,7 @@ pub fn svqrdcmlah_s32( } #[doc = "Saturating rounding doubling complex multiply-add high with rotate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdcmlah[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdcmlah, IMM_ROTATION = 90))] @@ -13129,7 +13129,7 @@ pub fn svqrdcmlah_s64( } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] @@ -13155,7 +13155,7 @@ pub fn svqrdmlah_lane_s16( } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] @@ -13181,7 +13181,7 @@ pub fn svqrdmlah_lane_s32( } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah, IMM_INDEX = 0))] @@ -13207,7 +13207,7 @@ pub fn svqrdmlah_lane_s64( } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah))] @@ -13223,7 +13223,7 @@ pub fn svqrdmlah_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah))] @@ -13232,7 +13232,7 @@ pub fn svqrdmlah_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah))] @@ -13248,7 +13248,7 @@ pub fn svqrdmlah_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_ } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah))] @@ -13257,7 +13257,7 @@ pub fn svqrdmlah_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah))] @@ -13273,7 +13273,7 @@ pub fn svqrdmlah_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_ } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah))] @@ -13282,7 +13282,7 @@ pub fn svqrdmlah_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah))] @@ -13298,7 +13298,7 @@ pub fn svqrdmlah_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_ } #[doc = "Saturating rounding doubling multiply-add high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlah[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlah))] @@ -13307,7 +13307,7 @@ pub fn svqrdmlah_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] @@ -13333,7 +13333,7 @@ pub fn svqrdmlsh_lane_s16( } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] @@ -13359,7 +13359,7 @@ pub fn svqrdmlsh_lane_s32( } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh, IMM_INDEX = 0))] @@ -13385,7 +13385,7 @@ pub fn svqrdmlsh_lane_s64( } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh))] @@ -13401,7 +13401,7 @@ pub fn svqrdmlsh_s8(op1: svint8_t, op2: svint8_t, op3: svint8_t) -> svint8_t { } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh))] @@ -13410,7 +13410,7 @@ pub fn svqrdmlsh_n_s8(op1: svint8_t, op2: svint8_t, op3: i8) -> svint8_t { } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh))] @@ -13426,7 +13426,7 @@ pub fn svqrdmlsh_s16(op1: svint16_t, op2: svint16_t, op3: svint16_t) -> svint16_ } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh))] @@ -13435,7 +13435,7 @@ pub fn svqrdmlsh_n_s16(op1: svint16_t, op2: svint16_t, op3: i16) -> svint16_t { } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh))] @@ -13451,7 +13451,7 @@ pub fn svqrdmlsh_s32(op1: svint32_t, op2: svint32_t, op3: svint32_t) -> svint32_ } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh))] @@ -13460,7 +13460,7 @@ pub fn svqrdmlsh_n_s32(op1: svint32_t, op2: svint32_t, op3: i32) -> svint32_t { } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh))] @@ -13476,7 +13476,7 @@ pub fn svqrdmlsh_s64(op1: svint64_t, op2: svint64_t, op3: svint64_t) -> svint64_ } #[doc = "Saturating rounding doubling multiply-subtract high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmlsh[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmlsh))] @@ -13485,7 +13485,7 @@ pub fn svqrdmlsh_n_s64(op1: svint64_t, op2: svint64_t, op3: i64) -> svint64_t { } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] @@ -13502,7 +13502,7 @@ pub fn svqrdmulh_lane_s16(op1: svint16_t, op2: svint16_t) } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] @@ -13519,7 +13519,7 @@ pub fn svqrdmulh_lane_s32(op1: svint32_t, op2: svint32_t) } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh_lane[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh, IMM_INDEX = 0))] @@ -13536,7 +13536,7 @@ pub fn svqrdmulh_lane_s64(op1: svint64_t, op2: svint64_t) } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh))] @@ -13552,7 +13552,7 @@ pub fn svqrdmulh_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh))] @@ -13561,7 +13561,7 @@ pub fn svqrdmulh_n_s8(op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh))] @@ -13577,7 +13577,7 @@ pub fn svqrdmulh_s16(op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh))] @@ -13586,7 +13586,7 @@ pub fn svqrdmulh_n_s16(op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh))] @@ -13602,7 +13602,7 @@ pub fn svqrdmulh_s32(op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh))] @@ -13611,7 +13611,7 @@ pub fn svqrdmulh_n_s32(op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh))] @@ -13627,7 +13627,7 @@ pub fn svqrdmulh_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating rounding doubling multiply high"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrdmulh[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrdmulh))] @@ -13636,7 +13636,7 @@ pub fn svqrdmulh_n_s64(op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13649,7 +13649,7 @@ pub fn svqrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13658,7 +13658,7 @@ pub fn svqrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13667,7 +13667,7 @@ pub fn svqrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13676,7 +13676,7 @@ pub fn svqrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13685,7 +13685,7 @@ pub fn svqrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13694,7 +13694,7 @@ pub fn svqrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13707,7 +13707,7 @@ pub fn svqrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13716,7 +13716,7 @@ pub fn svqrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13725,7 +13725,7 @@ pub fn svqrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13734,7 +13734,7 @@ pub fn svqrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13743,7 +13743,7 @@ pub fn svqrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13752,7 +13752,7 @@ pub fn svqrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13765,7 +13765,7 @@ pub fn svqrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13774,7 +13774,7 @@ pub fn svqrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13783,7 +13783,7 @@ pub fn svqrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13792,7 +13792,7 @@ pub fn svqrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13801,7 +13801,7 @@ pub fn svqrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13810,7 +13810,7 @@ pub fn svqrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13823,7 +13823,7 @@ pub fn svqrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13832,7 +13832,7 @@ pub fn svqrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13841,7 +13841,7 @@ pub fn svqrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13850,7 +13850,7 @@ pub fn svqrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13859,7 +13859,7 @@ pub fn svqrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshl))] @@ -13868,7 +13868,7 @@ pub fn svqrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13881,7 +13881,7 @@ pub fn svqrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13890,7 +13890,7 @@ pub fn svqrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13899,7 +13899,7 @@ pub fn svqrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13908,7 +13908,7 @@ pub fn svqrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13917,7 +13917,7 @@ pub fn svqrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13926,7 +13926,7 @@ pub fn svqrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13939,7 +13939,7 @@ pub fn svqrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_ } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13948,7 +13948,7 @@ pub fn svqrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13957,7 +13957,7 @@ pub fn svqrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_ } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13966,7 +13966,7 @@ pub fn svqrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13975,7 +13975,7 @@ pub fn svqrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_ } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13984,7 +13984,7 @@ pub fn svqrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -13997,7 +13997,7 @@ pub fn svqrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_ } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14006,7 +14006,7 @@ pub fn svqrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14015,7 +14015,7 @@ pub fn svqrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_ } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14024,7 +14024,7 @@ pub fn svqrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14033,7 +14033,7 @@ pub fn svqrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_ } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14042,7 +14042,7 @@ pub fn svqrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14055,7 +14055,7 @@ pub fn svqrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_ } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14064,7 +14064,7 @@ pub fn svqrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14073,7 +14073,7 @@ pub fn svqrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_ } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14082,7 +14082,7 @@ pub fn svqrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14091,7 +14091,7 @@ pub fn svqrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_ } #[doc = "Saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshl[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshl))] @@ -14100,7 +14100,7 @@ pub fn svqrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Saturating rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] @@ -14117,7 +14117,7 @@ pub fn svqrshrnb_n_s16(op1: svint16_t) -> svint8_t { } #[doc = "Saturating rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] @@ -14134,7 +14134,7 @@ pub fn svqrshrnb_n_s32(op1: svint32_t) -> svint16_t { } #[doc = "Saturating rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrnb, IMM2 = 1))] @@ -14151,7 +14151,7 @@ pub fn svqrshrnb_n_s64(op1: svint64_t) -> svint32_t { } #[doc = "Saturating rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] @@ -14168,7 +14168,7 @@ pub fn svqrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { } #[doc = "Saturating rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] @@ -14185,7 +14185,7 @@ pub fn svqrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { } #[doc = "Saturating rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshrnb, IMM2 = 1))] @@ -14202,7 +14202,7 @@ pub fn svqrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { } #[doc = "Saturating rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] @@ -14219,7 +14219,7 @@ pub fn svqrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint } #[doc = "Saturating rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] @@ -14236,7 +14236,7 @@ pub fn svqrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svin } #[doc = "Saturating rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrnt, IMM2 = 1))] @@ -14253,7 +14253,7 @@ pub fn svqrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svin } #[doc = "Saturating rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] @@ -14270,7 +14270,7 @@ pub fn svqrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svu } #[doc = "Saturating rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] @@ -14287,7 +14287,7 @@ pub fn svqrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> sv } #[doc = "Saturating rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrnt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqrshrnt, IMM2 = 1))] @@ -14304,7 +14304,7 @@ pub fn svqrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> sv } #[doc = "Saturating rounding shift right unsigned narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] @@ -14321,7 +14321,7 @@ pub fn svqrshrunb_n_s16(op1: svint16_t) -> svuint8_t { } #[doc = "Saturating rounding shift right unsigned narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] @@ -14338,7 +14338,7 @@ pub fn svqrshrunb_n_s32(op1: svint32_t) -> svuint16_t { } #[doc = "Saturating rounding shift right unsigned narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrunb, IMM2 = 1))] @@ -14355,7 +14355,7 @@ pub fn svqrshrunb_n_s64(op1: svint64_t) -> svuint32_t { } #[doc = "Saturating rounding shift right unsigned narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] @@ -14372,7 +14372,7 @@ pub fn svqrshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svu } #[doc = "Saturating rounding shift right unsigned narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] @@ -14389,7 +14389,7 @@ pub fn svqrshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> sv } #[doc = "Saturating rounding shift right unsigned narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqrshrunt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqrshrunt, IMM2 = 1))] @@ -14406,7 +14406,7 @@ pub fn svqrshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> sv } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14419,7 +14419,7 @@ pub fn svqshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14428,7 +14428,7 @@ pub fn svqshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14437,7 +14437,7 @@ pub fn svqshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14446,7 +14446,7 @@ pub fn svqshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14455,7 +14455,7 @@ pub fn svqshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14464,7 +14464,7 @@ pub fn svqshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14477,7 +14477,7 @@ pub fn svqshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14486,7 +14486,7 @@ pub fn svqshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14495,7 +14495,7 @@ pub fn svqshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14504,7 +14504,7 @@ pub fn svqshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14513,7 +14513,7 @@ pub fn svqshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14522,7 +14522,7 @@ pub fn svqshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14535,7 +14535,7 @@ pub fn svqshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14544,7 +14544,7 @@ pub fn svqshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14553,7 +14553,7 @@ pub fn svqshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14562,7 +14562,7 @@ pub fn svqshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14571,7 +14571,7 @@ pub fn svqshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14580,7 +14580,7 @@ pub fn svqshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14593,7 +14593,7 @@ pub fn svqshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14602,7 +14602,7 @@ pub fn svqshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14611,7 +14611,7 @@ pub fn svqshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14620,7 +14620,7 @@ pub fn svqshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14629,7 +14629,7 @@ pub fn svqshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshl))] @@ -14638,7 +14638,7 @@ pub fn svqshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14651,7 +14651,7 @@ pub fn svqshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14660,7 +14660,7 @@ pub fn svqshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14669,7 +14669,7 @@ pub fn svqshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14678,7 +14678,7 @@ pub fn svqshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14687,7 +14687,7 @@ pub fn svqshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14696,7 +14696,7 @@ pub fn svqshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14709,7 +14709,7 @@ pub fn svqshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14718,7 +14718,7 @@ pub fn svqshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14727,7 +14727,7 @@ pub fn svqshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14736,7 +14736,7 @@ pub fn svqshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14745,7 +14745,7 @@ pub fn svqshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14754,7 +14754,7 @@ pub fn svqshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14767,7 +14767,7 @@ pub fn svqshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14776,7 +14776,7 @@ pub fn svqshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14785,7 +14785,7 @@ pub fn svqshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14794,7 +14794,7 @@ pub fn svqshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14803,7 +14803,7 @@ pub fn svqshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14812,7 +14812,7 @@ pub fn svqshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14825,7 +14825,7 @@ pub fn svqshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14834,7 +14834,7 @@ pub fn svqshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14843,7 +14843,7 @@ pub fn svqshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14852,7 +14852,7 @@ pub fn svqshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14861,7 +14861,7 @@ pub fn svqshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t } #[doc = "Saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshl[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshl))] @@ -14870,7 +14870,7 @@ pub fn svqshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14884,7 +14884,7 @@ pub fn svqshlu_n_s8_m(pg: svbool_t, op1: svint8_t) -> svuint8_t } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14893,7 +14893,7 @@ pub fn svqshlu_n_s8_x(pg: svbool_t, op1: svint8_t) -> svuint8_t } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14902,7 +14902,7 @@ pub fn svqshlu_n_s8_z(pg: svbool_t, op1: svint8_t) -> svuint8_t } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14916,7 +14916,7 @@ pub fn svqshlu_n_s16_m(pg: svbool_t, op1: svint16_t) -> svuint1 } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14925,7 +14925,7 @@ pub fn svqshlu_n_s16_x(pg: svbool_t, op1: svint16_t) -> svuint1 } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14934,7 +14934,7 @@ pub fn svqshlu_n_s16_z(pg: svbool_t, op1: svint16_t) -> svuint1 } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14948,7 +14948,7 @@ pub fn svqshlu_n_s32_m(pg: svbool_t, op1: svint32_t) -> svuint3 } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14957,7 +14957,7 @@ pub fn svqshlu_n_s32_x(pg: svbool_t, op1: svint32_t) -> svuint3 } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14966,7 +14966,7 @@ pub fn svqshlu_n_s32_z(pg: svbool_t, op1: svint32_t) -> svuint3 } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14980,7 +14980,7 @@ pub fn svqshlu_n_s64_m(pg: svbool_t, op1: svint64_t) -> svuint6 } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14989,7 +14989,7 @@ pub fn svqshlu_n_s64_x(pg: svbool_t, op1: svint64_t) -> svuint6 } #[doc = "Saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshlu[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshlu, IMM2 = 0))] @@ -14998,7 +14998,7 @@ pub fn svqshlu_n_s64_z(pg: svbool_t, op1: svint64_t) -> svuint6 } #[doc = "Saturating shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] @@ -15015,7 +15015,7 @@ pub fn svqshrnb_n_s16(op1: svint16_t) -> svint8_t { } #[doc = "Saturating shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] @@ -15032,7 +15032,7 @@ pub fn svqshrnb_n_s32(op1: svint32_t) -> svint16_t { } #[doc = "Saturating shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrnb, IMM2 = 1))] @@ -15049,7 +15049,7 @@ pub fn svqshrnb_n_s64(op1: svint64_t) -> svint32_t { } #[doc = "Saturating shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] @@ -15066,7 +15066,7 @@ pub fn svqshrnb_n_u16(op1: svuint16_t) -> svuint8_t { } #[doc = "Saturating shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] @@ -15083,7 +15083,7 @@ pub fn svqshrnb_n_u32(op1: svuint32_t) -> svuint16_t { } #[doc = "Saturating shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshrnb, IMM2 = 1))] @@ -15100,7 +15100,7 @@ pub fn svqshrnb_n_u64(op1: svuint64_t) -> svuint32_t { } #[doc = "Saturating shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] @@ -15117,7 +15117,7 @@ pub fn svqshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8 } #[doc = "Saturating shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] @@ -15134,7 +15134,7 @@ pub fn svqshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint } #[doc = "Saturating shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrnt, IMM2 = 1))] @@ -15151,7 +15151,7 @@ pub fn svqshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint } #[doc = "Saturating shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] @@ -15168,7 +15168,7 @@ pub fn svqshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svui } #[doc = "Saturating shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] @@ -15185,7 +15185,7 @@ pub fn svqshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svu } #[doc = "Saturating shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrnt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqshrnt, IMM2 = 1))] @@ -15202,7 +15202,7 @@ pub fn svqshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svu } #[doc = "Saturating shift right unsigned narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] @@ -15219,7 +15219,7 @@ pub fn svqshrunb_n_s16(op1: svint16_t) -> svuint8_t { } #[doc = "Saturating shift right unsigned narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] @@ -15236,7 +15236,7 @@ pub fn svqshrunb_n_s32(op1: svint32_t) -> svuint16_t { } #[doc = "Saturating shift right unsigned narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrunb, IMM2 = 1))] @@ -15253,7 +15253,7 @@ pub fn svqshrunb_n_s64(op1: svint64_t) -> svuint32_t { } #[doc = "Saturating shift right unsigned narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] @@ -15270,7 +15270,7 @@ pub fn svqshrunt_n_s16(even: svuint8_t, op1: svint16_t) -> svui } #[doc = "Saturating shift right unsigned narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] @@ -15287,7 +15287,7 @@ pub fn svqshrunt_n_s32(even: svuint16_t, op1: svint32_t) -> svu } #[doc = "Saturating shift right unsigned narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqshrunt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqshrunt, IMM2 = 1))] @@ -15304,7 +15304,7 @@ pub fn svqshrunt_n_s64(even: svuint32_t, op1: svint64_t) -> svu } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15317,7 +15317,7 @@ pub fn svqsub_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15326,7 +15326,7 @@ pub fn svqsub_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15335,7 +15335,7 @@ pub fn svqsub_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15344,7 +15344,7 @@ pub fn svqsub_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15353,7 +15353,7 @@ pub fn svqsub_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15362,7 +15362,7 @@ pub fn svqsub_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15375,7 +15375,7 @@ pub fn svqsub_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15384,7 +15384,7 @@ pub fn svqsub_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15393,7 +15393,7 @@ pub fn svqsub_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15402,7 +15402,7 @@ pub fn svqsub_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15411,7 +15411,7 @@ pub fn svqsub_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15420,7 +15420,7 @@ pub fn svqsub_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15433,7 +15433,7 @@ pub fn svqsub_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15442,7 +15442,7 @@ pub fn svqsub_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15451,7 +15451,7 @@ pub fn svqsub_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15460,7 +15460,7 @@ pub fn svqsub_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15469,7 +15469,7 @@ pub fn svqsub_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15478,7 +15478,7 @@ pub fn svqsub_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15491,7 +15491,7 @@ pub fn svqsub_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15500,7 +15500,7 @@ pub fn svqsub_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15509,7 +15509,7 @@ pub fn svqsub_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15518,7 +15518,7 @@ pub fn svqsub_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15527,7 +15527,7 @@ pub fn svqsub_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsub))] @@ -15536,7 +15536,7 @@ pub fn svqsub_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15549,7 +15549,7 @@ pub fn svqsub_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15558,7 +15558,7 @@ pub fn svqsub_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15567,7 +15567,7 @@ pub fn svqsub_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15576,7 +15576,7 @@ pub fn svqsub_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15585,7 +15585,7 @@ pub fn svqsub_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15594,7 +15594,7 @@ pub fn svqsub_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15607,7 +15607,7 @@ pub fn svqsub_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15616,7 +15616,7 @@ pub fn svqsub_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15625,7 +15625,7 @@ pub fn svqsub_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15634,7 +15634,7 @@ pub fn svqsub_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15643,7 +15643,7 @@ pub fn svqsub_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16_ } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15652,7 +15652,7 @@ pub fn svqsub_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15665,7 +15665,7 @@ pub fn svqsub_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15674,7 +15674,7 @@ pub fn svqsub_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15683,7 +15683,7 @@ pub fn svqsub_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15692,7 +15692,7 @@ pub fn svqsub_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15701,7 +15701,7 @@ pub fn svqsub_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32_ } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15710,7 +15710,7 @@ pub fn svqsub_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15723,7 +15723,7 @@ pub fn svqsub_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15732,7 +15732,7 @@ pub fn svqsub_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15741,7 +15741,7 @@ pub fn svqsub_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15750,7 +15750,7 @@ pub fn svqsub_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15759,7 +15759,7 @@ pub fn svqsub_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64_ } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsub[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsub))] @@ -15768,7 +15768,7 @@ pub fn svqsub_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15781,7 +15781,7 @@ pub fn svqsubr_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15790,7 +15790,7 @@ pub fn svqsubr_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15799,7 +15799,7 @@ pub fn svqsubr_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15808,7 +15808,7 @@ pub fn svqsubr_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15817,7 +15817,7 @@ pub fn svqsubr_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15826,7 +15826,7 @@ pub fn svqsubr_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15839,7 +15839,7 @@ pub fn svqsubr_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15848,7 +15848,7 @@ pub fn svqsubr_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15857,7 +15857,7 @@ pub fn svqsubr_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15866,7 +15866,7 @@ pub fn svqsubr_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15875,7 +15875,7 @@ pub fn svqsubr_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15884,7 +15884,7 @@ pub fn svqsubr_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15897,7 +15897,7 @@ pub fn svqsubr_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15906,7 +15906,7 @@ pub fn svqsubr_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15915,7 +15915,7 @@ pub fn svqsubr_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15924,7 +15924,7 @@ pub fn svqsubr_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15933,7 +15933,7 @@ pub fn svqsubr_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15942,7 +15942,7 @@ pub fn svqsubr_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15955,7 +15955,7 @@ pub fn svqsubr_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15964,7 +15964,7 @@ pub fn svqsubr_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15973,7 +15973,7 @@ pub fn svqsubr_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15982,7 +15982,7 @@ pub fn svqsubr_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -15991,7 +15991,7 @@ pub fn svqsubr_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqsubr))] @@ -16000,7 +16000,7 @@ pub fn svqsubr_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16013,7 +16013,7 @@ pub fn svqsubr_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16022,7 +16022,7 @@ pub fn svqsubr_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16031,7 +16031,7 @@ pub fn svqsubr_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16040,7 +16040,7 @@ pub fn svqsubr_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16049,7 +16049,7 @@ pub fn svqsubr_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16058,7 +16058,7 @@ pub fn svqsubr_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16071,7 +16071,7 @@ pub fn svqsubr_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16 } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16080,7 +16080,7 @@ pub fn svqsubr_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16089,7 +16089,7 @@ pub fn svqsubr_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16 } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16098,7 +16098,7 @@ pub fn svqsubr_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16107,7 +16107,7 @@ pub fn svqsubr_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16 } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16116,7 +16116,7 @@ pub fn svqsubr_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16129,7 +16129,7 @@ pub fn svqsubr_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32 } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16138,7 +16138,7 @@ pub fn svqsubr_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16147,7 +16147,7 @@ pub fn svqsubr_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32 } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16156,7 +16156,7 @@ pub fn svqsubr_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16165,7 +16165,7 @@ pub fn svqsubr_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32 } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16174,7 +16174,7 @@ pub fn svqsubr_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16187,7 +16187,7 @@ pub fn svqsubr_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64 } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16196,7 +16196,7 @@ pub fn svqsubr_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16205,7 +16205,7 @@ pub fn svqsubr_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64 } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16214,7 +16214,7 @@ pub fn svqsubr_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16223,7 +16223,7 @@ pub fn svqsubr_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64 } #[doc = "Saturating subtract reversed"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqsubr[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqsubr))] @@ -16232,7 +16232,7 @@ pub fn svqsubr_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Saturating extract narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtnb))] @@ -16245,7 +16245,7 @@ pub fn svqxtnb_s16(op: svint16_t) -> svint8_t { } #[doc = "Saturating extract narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtnb))] @@ -16258,7 +16258,7 @@ pub fn svqxtnb_s32(op: svint32_t) -> svint16_t { } #[doc = "Saturating extract narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtnb))] @@ -16271,7 +16271,7 @@ pub fn svqxtnb_s64(op: svint64_t) -> svint32_t { } #[doc = "Saturating extract narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqxtnb))] @@ -16284,7 +16284,7 @@ pub fn svqxtnb_u16(op: svuint16_t) -> svuint8_t { } #[doc = "Saturating extract narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqxtnb))] @@ -16297,7 +16297,7 @@ pub fn svqxtnb_u32(op: svuint32_t) -> svuint16_t { } #[doc = "Saturating extract narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqxtnb))] @@ -16310,7 +16310,7 @@ pub fn svqxtnb_u64(op: svuint64_t) -> svuint32_t { } #[doc = "Saturating extract narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtnt))] @@ -16323,7 +16323,7 @@ pub fn svqxtnt_s16(even: svint8_t, op: svint16_t) -> svint8_t { } #[doc = "Saturating extract narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtnt))] @@ -16336,7 +16336,7 @@ pub fn svqxtnt_s32(even: svint16_t, op: svint32_t) -> svint16_t { } #[doc = "Saturating extract narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtnt))] @@ -16349,7 +16349,7 @@ pub fn svqxtnt_s64(even: svint32_t, op: svint64_t) -> svint32_t { } #[doc = "Saturating extract narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqxtnt))] @@ -16362,7 +16362,7 @@ pub fn svqxtnt_u16(even: svuint8_t, op: svuint16_t) -> svuint8_t { } #[doc = "Saturating extract narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqxtnt))] @@ -16375,7 +16375,7 @@ pub fn svqxtnt_u32(even: svuint16_t, op: svuint32_t) -> svuint16_t { } #[doc = "Saturating extract narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtnt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uqxtnt))] @@ -16388,7 +16388,7 @@ pub fn svqxtnt_u64(even: svuint32_t, op: svuint64_t) -> svuint32_t { } #[doc = "Saturating extract unsigned narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtunb))] @@ -16404,7 +16404,7 @@ pub fn svqxtunb_s16(op: svint16_t) -> svuint8_t { } #[doc = "Saturating extract unsigned narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtunb))] @@ -16420,7 +16420,7 @@ pub fn svqxtunb_s32(op: svint32_t) -> svuint16_t { } #[doc = "Saturating extract unsigned narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtunb))] @@ -16436,7 +16436,7 @@ pub fn svqxtunb_s64(op: svint64_t) -> svuint32_t { } #[doc = "Saturating extract unsigned narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtunt))] @@ -16452,7 +16452,7 @@ pub fn svqxtunt_s16(even: svuint8_t, op: svint16_t) -> svuint8_t { } #[doc = "Saturating extract unsigned narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtunt))] @@ -16468,7 +16468,7 @@ pub fn svqxtunt_s32(even: svuint16_t, op: svint32_t) -> svuint16_t { } #[doc = "Saturating extract unsigned narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svqxtunt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sqxtunt))] @@ -16484,7 +16484,7 @@ pub fn svqxtunt_s64(even: svuint32_t, op: svint64_t) -> svuint32_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16500,7 +16500,7 @@ pub fn svraddhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16509,7 +16509,7 @@ pub fn svraddhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16525,7 +16525,7 @@ pub fn svraddhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16534,7 +16534,7 @@ pub fn svraddhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16550,7 +16550,7 @@ pub fn svraddhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16559,7 +16559,7 @@ pub fn svraddhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16568,7 +16568,7 @@ pub fn svraddhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16577,7 +16577,7 @@ pub fn svraddhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16586,7 +16586,7 @@ pub fn svraddhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16595,7 +16595,7 @@ pub fn svraddhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16604,7 +16604,7 @@ pub fn svraddhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { } #[doc = "Rounding add narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnb))] @@ -16613,7 +16613,7 @@ pub fn svraddhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16629,7 +16629,7 @@ pub fn svraddhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16638,7 +16638,7 @@ pub fn svraddhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16654,7 +16654,7 @@ pub fn svraddhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16 } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16663,7 +16663,7 @@ pub fn svraddhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16679,7 +16679,7 @@ pub fn svraddhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32 } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16688,7 +16688,7 @@ pub fn svraddhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16697,7 +16697,7 @@ pub fn svraddhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuin } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16706,7 +16706,7 @@ pub fn svraddhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16715,7 +16715,7 @@ pub fn svraddhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svui } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16724,7 +16724,7 @@ pub fn svraddhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_ } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16733,7 +16733,7 @@ pub fn svraddhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svui } #[doc = "Rounding add narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svraddhnt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(raddhnt))] @@ -16742,7 +16742,7 @@ pub fn svraddhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_ } #[doc = "Bitwise rotate left by 1 and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-sha3")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rax1))] @@ -16755,7 +16755,7 @@ pub fn svrax1_s64(op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Bitwise rotate left by 1 and exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrax1[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-sha3")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rax1))] @@ -16764,7 +16764,7 @@ pub fn svrax1_u64(op1: svuint64_t, op2: svuint64_t) -> svuint64_t { } #[doc = "Reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urecpe))] @@ -16777,7 +16777,7 @@ pub fn svrecpe_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svui } #[doc = "Reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urecpe))] @@ -16786,7 +16786,7 @@ pub fn svrecpe_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrecpe[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urecpe))] @@ -16795,7 +16795,7 @@ pub fn svrecpe_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16808,7 +16808,7 @@ pub fn svrhadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16817,7 +16817,7 @@ pub fn svrhadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16826,7 +16826,7 @@ pub fn svrhadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16835,7 +16835,7 @@ pub fn svrhadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16844,7 +16844,7 @@ pub fn svrhadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16853,7 +16853,7 @@ pub fn svrhadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16866,7 +16866,7 @@ pub fn svrhadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16875,7 +16875,7 @@ pub fn svrhadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16884,7 +16884,7 @@ pub fn svrhadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16893,7 +16893,7 @@ pub fn svrhadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16902,7 +16902,7 @@ pub fn svrhadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16911,7 +16911,7 @@ pub fn svrhadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16924,7 +16924,7 @@ pub fn svrhadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16933,7 +16933,7 @@ pub fn svrhadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16942,7 +16942,7 @@ pub fn svrhadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16951,7 +16951,7 @@ pub fn svrhadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16960,7 +16960,7 @@ pub fn svrhadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16969,7 +16969,7 @@ pub fn svrhadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16982,7 +16982,7 @@ pub fn svrhadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -16991,7 +16991,7 @@ pub fn svrhadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -17000,7 +17000,7 @@ pub fn svrhadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -17009,7 +17009,7 @@ pub fn svrhadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -17018,7 +17018,7 @@ pub fn svrhadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srhadd))] @@ -17027,7 +17027,7 @@ pub fn svrhadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17040,7 +17040,7 @@ pub fn svrhadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17049,7 +17049,7 @@ pub fn svrhadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17058,7 +17058,7 @@ pub fn svrhadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17067,7 +17067,7 @@ pub fn svrhadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17076,7 +17076,7 @@ pub fn svrhadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svuint8_t) -> svuint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17085,7 +17085,7 @@ pub fn svrhadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: u8) -> svuint8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17098,7 +17098,7 @@ pub fn svrhadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16 } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17107,7 +17107,7 @@ pub fn svrhadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17116,7 +17116,7 @@ pub fn svrhadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16 } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17125,7 +17125,7 @@ pub fn svrhadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17134,7 +17134,7 @@ pub fn svrhadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svuint16_t) -> svuint16 } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17143,7 +17143,7 @@ pub fn svrhadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: u16) -> svuint16_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17156,7 +17156,7 @@ pub fn svrhadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32 } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17165,7 +17165,7 @@ pub fn svrhadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17174,7 +17174,7 @@ pub fn svrhadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32 } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17183,7 +17183,7 @@ pub fn svrhadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17192,7 +17192,7 @@ pub fn svrhadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svuint32_t) -> svuint32 } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17201,7 +17201,7 @@ pub fn svrhadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: u32) -> svuint32_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17214,7 +17214,7 @@ pub fn svrhadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64 } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17223,7 +17223,7 @@ pub fn svrhadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17232,7 +17232,7 @@ pub fn svrhadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64 } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17241,7 +17241,7 @@ pub fn svrhadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17250,7 +17250,7 @@ pub fn svrhadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svuint64_t) -> svuint64 } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrhadd[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urhadd))] @@ -17259,7 +17259,7 @@ pub fn svrhadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: u64) -> svuint64_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17272,7 +17272,7 @@ pub fn svrshl_s8_m(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17281,7 +17281,7 @@ pub fn svrshl_n_s8_m(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17290,7 +17290,7 @@ pub fn svrshl_s8_x(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17299,7 +17299,7 @@ pub fn svrshl_n_s8_x(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17308,7 +17308,7 @@ pub fn svrshl_s8_z(pg: svbool_t, op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17317,7 +17317,7 @@ pub fn svrshl_n_s8_z(pg: svbool_t, op1: svint8_t, op2: i8) -> svint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17330,7 +17330,7 @@ pub fn svrshl_s16_m(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17339,7 +17339,7 @@ pub fn svrshl_n_s16_m(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17348,7 +17348,7 @@ pub fn svrshl_s16_x(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17357,7 +17357,7 @@ pub fn svrshl_n_s16_x(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17366,7 +17366,7 @@ pub fn svrshl_s16_z(pg: svbool_t, op1: svint16_t, op2: svint16_t) -> svint16_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17375,7 +17375,7 @@ pub fn svrshl_n_s16_z(pg: svbool_t, op1: svint16_t, op2: i16) -> svint16_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17388,7 +17388,7 @@ pub fn svrshl_s32_m(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17397,7 +17397,7 @@ pub fn svrshl_n_s32_m(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17406,7 +17406,7 @@ pub fn svrshl_s32_x(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17415,7 +17415,7 @@ pub fn svrshl_n_s32_x(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17424,7 +17424,7 @@ pub fn svrshl_s32_z(pg: svbool_t, op1: svint32_t, op2: svint32_t) -> svint32_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17433,7 +17433,7 @@ pub fn svrshl_n_s32_z(pg: svbool_t, op1: svint32_t, op2: i32) -> svint32_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17446,7 +17446,7 @@ pub fn svrshl_s64_m(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17455,7 +17455,7 @@ pub fn svrshl_n_s64_m(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17464,7 +17464,7 @@ pub fn svrshl_s64_x(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17473,7 +17473,7 @@ pub fn svrshl_n_s64_x(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17482,7 +17482,7 @@ pub fn svrshl_s64_z(pg: svbool_t, op1: svint64_t, op2: svint64_t) -> svint64_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshl))] @@ -17491,7 +17491,7 @@ pub fn svrshl_n_s64_z(pg: svbool_t, op1: svint64_t, op2: i64) -> svint64_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17504,7 +17504,7 @@ pub fn svrshl_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17513,7 +17513,7 @@ pub fn svrshl_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17522,7 +17522,7 @@ pub fn svrshl_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17531,7 +17531,7 @@ pub fn svrshl_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17540,7 +17540,7 @@ pub fn svrshl_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17549,7 +17549,7 @@ pub fn svrshl_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17562,7 +17562,7 @@ pub fn svrshl_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17571,7 +17571,7 @@ pub fn svrshl_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17580,7 +17580,7 @@ pub fn svrshl_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17589,7 +17589,7 @@ pub fn svrshl_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17598,7 +17598,7 @@ pub fn svrshl_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_t } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17607,7 +17607,7 @@ pub fn svrshl_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17620,7 +17620,7 @@ pub fn svrshl_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17629,7 +17629,7 @@ pub fn svrshl_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17638,7 +17638,7 @@ pub fn svrshl_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17647,7 +17647,7 @@ pub fn svrshl_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17656,7 +17656,7 @@ pub fn svrshl_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_t } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17665,7 +17665,7 @@ pub fn svrshl_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17678,7 +17678,7 @@ pub fn svrshl_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17687,7 +17687,7 @@ pub fn svrshl_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17696,7 +17696,7 @@ pub fn svrshl_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17705,7 +17705,7 @@ pub fn svrshl_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17714,7 +17714,7 @@ pub fn svrshl_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_t } #[doc = "Rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshl[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshl))] @@ -17723,7 +17723,7 @@ pub fn svrshl_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17737,7 +17737,7 @@ pub fn svrshr_n_s8_m(pg: svbool_t, op1: svint8_t) -> svint8_t { } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17746,7 +17746,7 @@ pub fn svrshr_n_s8_x(pg: svbool_t, op1: svint8_t) -> svint8_t { } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17755,7 +17755,7 @@ pub fn svrshr_n_s8_z(pg: svbool_t, op1: svint8_t) -> svint8_t { } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17769,7 +17769,7 @@ pub fn svrshr_n_s16_m(pg: svbool_t, op1: svint16_t) -> svint16_ } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17778,7 +17778,7 @@ pub fn svrshr_n_s16_x(pg: svbool_t, op1: svint16_t) -> svint16_ } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17787,7 +17787,7 @@ pub fn svrshr_n_s16_z(pg: svbool_t, op1: svint16_t) -> svint16_ } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17801,7 +17801,7 @@ pub fn svrshr_n_s32_m(pg: svbool_t, op1: svint32_t) -> svint32_ } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17810,7 +17810,7 @@ pub fn svrshr_n_s32_x(pg: svbool_t, op1: svint32_t) -> svint32_ } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17819,7 +17819,7 @@ pub fn svrshr_n_s32_z(pg: svbool_t, op1: svint32_t) -> svint32_ } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17833,7 +17833,7 @@ pub fn svrshr_n_s64_m(pg: svbool_t, op1: svint64_t) -> svint64_ } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17842,7 +17842,7 @@ pub fn svrshr_n_s64_x(pg: svbool_t, op1: svint64_t) -> svint64_ } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srshr, IMM2 = 1))] @@ -17851,7 +17851,7 @@ pub fn svrshr_n_s64_z(pg: svbool_t, op1: svint64_t) -> svint64_ } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17865,7 +17865,7 @@ pub fn svrshr_n_u8_m(pg: svbool_t, op1: svuint8_t) -> svuint8_t } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17874,7 +17874,7 @@ pub fn svrshr_n_u8_x(pg: svbool_t, op1: svuint8_t) -> svuint8_t } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17883,7 +17883,7 @@ pub fn svrshr_n_u8_z(pg: svbool_t, op1: svuint8_t) -> svuint8_t } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17897,7 +17897,7 @@ pub fn svrshr_n_u16_m(pg: svbool_t, op1: svuint16_t) -> svuint1 } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17906,7 +17906,7 @@ pub fn svrshr_n_u16_x(pg: svbool_t, op1: svuint16_t) -> svuint1 } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17915,7 +17915,7 @@ pub fn svrshr_n_u16_z(pg: svbool_t, op1: svuint16_t) -> svuint1 } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17929,7 +17929,7 @@ pub fn svrshr_n_u32_m(pg: svbool_t, op1: svuint32_t) -> svuint3 } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17938,7 +17938,7 @@ pub fn svrshr_n_u32_x(pg: svbool_t, op1: svuint32_t) -> svuint3 } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17947,7 +17947,7 @@ pub fn svrshr_n_u32_z(pg: svbool_t, op1: svuint32_t) -> svuint3 } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17961,7 +17961,7 @@ pub fn svrshr_n_u64_m(pg: svbool_t, op1: svuint64_t) -> svuint6 } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17970,7 +17970,7 @@ pub fn svrshr_n_u64_x(pg: svbool_t, op1: svuint64_t) -> svuint6 } #[doc = "Rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshr[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(urshr, IMM2 = 1))] @@ -17979,7 +17979,7 @@ pub fn svrshr_n_u64_z(pg: svbool_t, op1: svuint64_t) -> svuint6 } #[doc = "Rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] @@ -17993,7 +17993,7 @@ pub fn svrshrnb_n_s16(op1: svint16_t) -> svint8_t { } #[doc = "Rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] @@ -18007,7 +18007,7 @@ pub fn svrshrnb_n_s32(op1: svint32_t) -> svint16_t { } #[doc = "Rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] @@ -18021,7 +18021,7 @@ pub fn svrshrnb_n_s64(op1: svint64_t) -> svint32_t { } #[doc = "Rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] @@ -18031,7 +18031,7 @@ pub fn svrshrnb_n_u16(op1: svuint16_t) -> svuint8_t { } #[doc = "Rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] @@ -18041,7 +18041,7 @@ pub fn svrshrnb_n_u32(op1: svuint32_t) -> svuint16_t { } #[doc = "Rounding shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnb, IMM2 = 1))] @@ -18051,7 +18051,7 @@ pub fn svrshrnb_n_u64(op1: svuint64_t) -> svuint32_t { } #[doc = "Rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] @@ -18065,7 +18065,7 @@ pub fn svrshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8 } #[doc = "Rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] @@ -18079,7 +18079,7 @@ pub fn svrshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint } #[doc = "Rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] @@ -18093,7 +18093,7 @@ pub fn svrshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint } #[doc = "Rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] @@ -18103,7 +18103,7 @@ pub fn svrshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svui } #[doc = "Rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] @@ -18113,7 +18113,7 @@ pub fn svrshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svu } #[doc = "Rounding shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrshrnt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rshrnt, IMM2 = 1))] @@ -18123,7 +18123,7 @@ pub fn svrshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svu } #[doc = "Reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ursqrte))] @@ -18139,7 +18139,7 @@ pub fn svrsqrte_u32_m(inactive: svuint32_t, pg: svbool_t, op: svuint32_t) -> svu } #[doc = "Reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ursqrte))] @@ -18148,7 +18148,7 @@ pub fn svrsqrte_u32_x(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsqrte[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ursqrte))] @@ -18157,7 +18157,7 @@ pub fn svrsqrte_u32_z(pg: svbool_t, op: svuint32_t) -> svuint32_t { } #[doc = "Rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] @@ -18171,7 +18171,7 @@ pub fn svrsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] @@ -18185,7 +18185,7 @@ pub fn svrsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_ } #[doc = "Rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] @@ -18199,7 +18199,7 @@ pub fn svrsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_ } #[doc = "Rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(srsra, IMM3 = 1))] @@ -18213,7 +18213,7 @@ pub fn svrsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_ } #[doc = "Rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] @@ -18227,7 +18227,7 @@ pub fn svrsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t } #[doc = "Rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] @@ -18241,7 +18241,7 @@ pub fn svrsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint } #[doc = "Rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] @@ -18255,7 +18255,7 @@ pub fn svrsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint } #[doc = "Rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsra[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ursra, IMM3 = 1))] @@ -18269,7 +18269,7 @@ pub fn svrsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18285,7 +18285,7 @@ pub fn svrsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18294,7 +18294,7 @@ pub fn svrsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18310,7 +18310,7 @@ pub fn svrsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18319,7 +18319,7 @@ pub fn svrsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18335,7 +18335,7 @@ pub fn svrsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18344,7 +18344,7 @@ pub fn svrsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18353,7 +18353,7 @@ pub fn svrsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18362,7 +18362,7 @@ pub fn svrsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18371,7 +18371,7 @@ pub fn svrsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18380,7 +18380,7 @@ pub fn svrsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18389,7 +18389,7 @@ pub fn svrsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { } #[doc = "Rounding subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnb))] @@ -18398,7 +18398,7 @@ pub fn svrsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18414,7 +18414,7 @@ pub fn svrsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18423,7 +18423,7 @@ pub fn svrsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18439,7 +18439,7 @@ pub fn svrsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16 } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18448,7 +18448,7 @@ pub fn svrsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18464,7 +18464,7 @@ pub fn svrsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32 } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18473,7 +18473,7 @@ pub fn svrsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18482,7 +18482,7 @@ pub fn svrsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuin } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18491,7 +18491,7 @@ pub fn svrsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18500,7 +18500,7 @@ pub fn svrsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svui } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18509,7 +18509,7 @@ pub fn svrsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_ } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18518,7 +18518,7 @@ pub fn svrsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svui } #[doc = "Rounding subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svrsubhnt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(rsubhnt))] @@ -18527,7 +18527,7 @@ pub fn svrsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_ } #[doc = "Subtract with borrow long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbclb))] @@ -18540,7 +18540,7 @@ pub fn svsbclb_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint3 } #[doc = "Subtract with borrow long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbclb))] @@ -18549,7 +18549,7 @@ pub fn svsbclb_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Subtract with borrow long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbclb))] @@ -18562,7 +18562,7 @@ pub fn svsbclb_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint6 } #[doc = "Subtract with borrow long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbclb))] @@ -18571,7 +18571,7 @@ pub fn svsbclb_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Subtract with borrow long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbclt))] @@ -18584,7 +18584,7 @@ pub fn svsbclt_u32(op1: svuint32_t, op2: svuint32_t, op3: svuint32_t) -> svuint3 } #[doc = "Subtract with borrow long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbclt))] @@ -18593,7 +18593,7 @@ pub fn svsbclt_n_u32(op1: svuint32_t, op2: svuint32_t, op3: u32) -> svuint32_t { } #[doc = "Subtract with borrow long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbclt))] @@ -18606,7 +18606,7 @@ pub fn svsbclt_u64(op1: svuint64_t, op2: svuint64_t, op3: svuint64_t) -> svuint6 } #[doc = "Subtract with borrow long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsbclt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sbclt))] @@ -18615,7 +18615,7 @@ pub fn svsbclt_n_u64(op1: svuint64_t, op2: svuint64_t, op3: u64) -> svuint64_t { } #[doc = "Shift left long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] @@ -18629,7 +18629,7 @@ pub fn svshllb_n_s16(op1: svint8_t) -> svint16_t { } #[doc = "Shift left long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] @@ -18643,7 +18643,7 @@ pub fn svshllb_n_s32(op1: svint16_t) -> svint32_t { } #[doc = "Shift left long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllb, IMM2 = 0))] @@ -18657,7 +18657,7 @@ pub fn svshllb_n_s64(op1: svint32_t) -> svint64_t { } #[doc = "Shift left long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] @@ -18671,7 +18671,7 @@ pub fn svshllb_n_u16(op1: svuint8_t) -> svuint16_t { } #[doc = "Shift left long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] @@ -18685,7 +18685,7 @@ pub fn svshllb_n_u32(op1: svuint16_t) -> svuint32_t { } #[doc = "Shift left long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllb, IMM2 = 0))] @@ -18699,7 +18699,7 @@ pub fn svshllb_n_u64(op1: svuint32_t) -> svuint64_t { } #[doc = "Shift left long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] @@ -18713,7 +18713,7 @@ pub fn svshllt_n_s16(op1: svint8_t) -> svint16_t { } #[doc = "Shift left long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] @@ -18727,7 +18727,7 @@ pub fn svshllt_n_s32(op1: svint16_t) -> svint32_t { } #[doc = "Shift left long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sshllt, IMM2 = 0))] @@ -18741,7 +18741,7 @@ pub fn svshllt_n_s64(op1: svint32_t) -> svint64_t { } #[doc = "Shift left long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] @@ -18755,7 +18755,7 @@ pub fn svshllt_n_u16(op1: svuint8_t) -> svuint16_t { } #[doc = "Shift left long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] @@ -18769,7 +18769,7 @@ pub fn svshllt_n_u32(op1: svuint16_t) -> svuint32_t { } #[doc = "Shift left long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshllt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ushllt, IMM2 = 0))] @@ -18783,7 +18783,7 @@ pub fn svshllt_n_u64(op1: svuint32_t) -> svuint64_t { } #[doc = "Shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] @@ -18797,7 +18797,7 @@ pub fn svshrnb_n_s16(op1: svint16_t) -> svint8_t { } #[doc = "Shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] @@ -18811,7 +18811,7 @@ pub fn svshrnb_n_s32(op1: svint32_t) -> svint16_t { } #[doc = "Shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] @@ -18825,7 +18825,7 @@ pub fn svshrnb_n_s64(op1: svint64_t) -> svint32_t { } #[doc = "Shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] @@ -18835,7 +18835,7 @@ pub fn svshrnb_n_u16(op1: svuint16_t) -> svuint8_t { } #[doc = "Shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] @@ -18845,7 +18845,7 @@ pub fn svshrnb_n_u32(op1: svuint32_t) -> svuint16_t { } #[doc = "Shift right narrow (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnb, IMM2 = 1))] @@ -18855,7 +18855,7 @@ pub fn svshrnb_n_u64(op1: svuint64_t) -> svuint32_t { } #[doc = "Shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] @@ -18869,7 +18869,7 @@ pub fn svshrnt_n_s16(even: svint8_t, op1: svint16_t) -> svint8_ } #[doc = "Shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] @@ -18883,7 +18883,7 @@ pub fn svshrnt_n_s32(even: svint16_t, op1: svint32_t) -> svint1 } #[doc = "Shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] @@ -18897,7 +18897,7 @@ pub fn svshrnt_n_s64(even: svint32_t, op1: svint64_t) -> svint3 } #[doc = "Shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] @@ -18907,7 +18907,7 @@ pub fn svshrnt_n_u16(even: svuint8_t, op1: svuint16_t) -> svuin } #[doc = "Shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] @@ -18917,7 +18917,7 @@ pub fn svshrnt_n_u32(even: svuint16_t, op1: svuint32_t) -> svui } #[doc = "Shift right narrow (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svshrnt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(shrnt, IMM2 = 1))] @@ -18927,7 +18927,7 @@ pub fn svshrnt_n_u64(even: svuint32_t, op1: svuint64_t) -> svui } #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sli, IMM3 = 0))] @@ -18941,7 +18941,7 @@ pub fn svsli_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sli, IMM3 = 0))] @@ -18955,7 +18955,7 @@ pub fn svsli_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sli, IMM3 = 0))] @@ -18969,7 +18969,7 @@ pub fn svsli_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sli, IMM3 = 0))] @@ -18983,7 +18983,7 @@ pub fn svsli_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sli, IMM3 = 0))] @@ -18993,7 +18993,7 @@ pub fn svsli_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t } #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sli, IMM3 = 0))] @@ -19003,7 +19003,7 @@ pub fn svsli_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint1 } #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sli, IMM3 = 0))] @@ -19013,7 +19013,7 @@ pub fn svsli_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint3 } #[doc = "Shift left and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsli[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sli, IMM3 = 0))] @@ -19023,7 +19023,7 @@ pub fn svsli_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint6 } #[doc = "SM4 encryption and decryption"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4e[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-sm4")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sm4e))] @@ -19036,7 +19036,7 @@ pub fn svsm4e_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "SM4 key updates"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsm4ekey[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2,sve2-sm4")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sm4ekey))] @@ -19049,7 +19049,7 @@ pub fn svsm4ekey_u32(op1: svuint32_t, op2: svuint32_t) -> svuint32_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19062,7 +19062,7 @@ pub fn svsqadd_u8_m(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19071,7 +19071,7 @@ pub fn svsqadd_n_u8_m(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19080,7 +19080,7 @@ pub fn svsqadd_u8_x(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19089,7 +19089,7 @@ pub fn svsqadd_n_u8_x(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19098,7 +19098,7 @@ pub fn svsqadd_u8_z(pg: svbool_t, op1: svuint8_t, op2: svint8_t) -> svuint8_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19107,7 +19107,7 @@ pub fn svsqadd_n_u8_z(pg: svbool_t, op1: svuint8_t, op2: i8) -> svuint8_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19120,7 +19120,7 @@ pub fn svsqadd_u16_m(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_ } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19129,7 +19129,7 @@ pub fn svsqadd_n_u16_m(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19138,7 +19138,7 @@ pub fn svsqadd_u16_x(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_ } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19147,7 +19147,7 @@ pub fn svsqadd_n_u16_x(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19156,7 +19156,7 @@ pub fn svsqadd_u16_z(pg: svbool_t, op1: svuint16_t, op2: svint16_t) -> svuint16_ } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19165,7 +19165,7 @@ pub fn svsqadd_n_u16_z(pg: svbool_t, op1: svuint16_t, op2: i16) -> svuint16_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19178,7 +19178,7 @@ pub fn svsqadd_u32_m(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_ } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19187,7 +19187,7 @@ pub fn svsqadd_n_u32_m(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19196,7 +19196,7 @@ pub fn svsqadd_u32_x(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_ } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19205,7 +19205,7 @@ pub fn svsqadd_n_u32_x(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19214,7 +19214,7 @@ pub fn svsqadd_u32_z(pg: svbool_t, op1: svuint32_t, op2: svint32_t) -> svuint32_ } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19223,7 +19223,7 @@ pub fn svsqadd_n_u32_z(pg: svbool_t, op1: svuint32_t, op2: i32) -> svuint32_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19236,7 +19236,7 @@ pub fn svsqadd_u64_m(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_ } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19245,7 +19245,7 @@ pub fn svsqadd_n_u64_m(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19254,7 +19254,7 @@ pub fn svsqadd_u64_x(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_ } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19263,7 +19263,7 @@ pub fn svsqadd_n_u64_x(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19272,7 +19272,7 @@ pub fn svsqadd_u64_z(pg: svbool_t, op1: svuint64_t, op2: svint64_t) -> svuint64_ } #[doc = "Saturating add with signed addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsqadd[_n_u64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usqadd))] @@ -19281,7 +19281,7 @@ pub fn svsqadd_n_u64_z(pg: svbool_t, op1: svuint64_t, op2: i64) -> svuint64_t { } #[doc = "Shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] @@ -19295,7 +19295,7 @@ pub fn svsra_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] @@ -19309,7 +19309,7 @@ pub fn svsra_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] @@ -19323,7 +19323,7 @@ pub fn svsra_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssra, IMM3 = 1))] @@ -19337,7 +19337,7 @@ pub fn svsra_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usra, IMM3 = 1))] @@ -19351,7 +19351,7 @@ pub fn svsra_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t } #[doc = "Shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usra, IMM3 = 1))] @@ -19365,7 +19365,7 @@ pub fn svsra_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint1 } #[doc = "Shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usra, IMM3 = 1))] @@ -19379,7 +19379,7 @@ pub fn svsra_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint3 } #[doc = "Shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsra[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usra, IMM3 = 1))] @@ -19393,7 +19393,7 @@ pub fn svsra_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint6 } #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sri, IMM3 = 1))] @@ -19407,7 +19407,7 @@ pub fn svsri_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sri, IMM3 = 1))] @@ -19421,7 +19421,7 @@ pub fn svsri_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sri, IMM3 = 1))] @@ -19435,7 +19435,7 @@ pub fn svsri_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sri, IMM3 = 1))] @@ -19449,7 +19449,7 @@ pub fn svsri_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sri, IMM3 = 1))] @@ -19459,7 +19459,7 @@ pub fn svsri_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t } #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sri, IMM3 = 1))] @@ -19469,7 +19469,7 @@ pub fn svsri_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint1 } #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sri, IMM3 = 1))] @@ -19479,7 +19479,7 @@ pub fn svsri_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint3 } #[doc = "Shift right and insert"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsri[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sri, IMM3 = 1))] @@ -19493,7 +19493,7 @@ pub fn svsri_n_u64(op1: svuint64_t, op2: svuint64_t) -> svuint6 #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19523,7 +19523,7 @@ pub unsafe fn svstnt1_scatter_s64index_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19553,7 +19553,7 @@ pub unsafe fn svstnt1_scatter_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19571,7 +19571,7 @@ pub unsafe fn svstnt1_scatter_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19589,7 +19589,7 @@ pub unsafe fn svstnt1_scatter_u64index_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19607,7 +19607,7 @@ pub unsafe fn svstnt1_scatter_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19625,7 +19625,7 @@ pub unsafe fn svstnt1_scatter_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19655,7 +19655,7 @@ pub unsafe fn svstnt1_scatter_s64offset_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19685,7 +19685,7 @@ pub unsafe fn svstnt1_scatter_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19703,7 +19703,7 @@ pub unsafe fn svstnt1_scatter_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -19733,7 +19733,7 @@ pub unsafe fn svstnt1_scatter_u32offset_f32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -19763,7 +19763,7 @@ pub unsafe fn svstnt1_scatter_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -19781,7 +19781,7 @@ pub unsafe fn svstnt1_scatter_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19799,7 +19799,7 @@ pub unsafe fn svstnt1_scatter_u64offset_f64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19817,7 +19817,7 @@ pub unsafe fn svstnt1_scatter_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19836,7 +19836,7 @@ pub unsafe fn svstnt1_scatter_u64offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -19850,7 +19850,7 @@ pub unsafe fn svstnt1_scatter_u32base_f32(pg: svbool_t, bases: svuint32_t, data: #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -19864,7 +19864,7 @@ pub unsafe fn svstnt1_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data: #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -19878,7 +19878,7 @@ pub unsafe fn svstnt1_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data: #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19892,7 +19892,7 @@ pub unsafe fn svstnt1_scatter_u64base_f64(pg: svbool_t, bases: svuint64_t, data: #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19906,7 +19906,7 @@ pub unsafe fn svstnt1_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data: #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19920,7 +19920,7 @@ pub unsafe fn svstnt1_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data: #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -19939,7 +19939,7 @@ pub unsafe fn svstnt1_scatter_u32base_index_f32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -19958,7 +19958,7 @@ pub unsafe fn svstnt1_scatter_u32base_index_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -19977,7 +19977,7 @@ pub unsafe fn svstnt1_scatter_u32base_index_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -19996,7 +19996,7 @@ pub unsafe fn svstnt1_scatter_u64base_index_f64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -20015,7 +20015,7 @@ pub unsafe fn svstnt1_scatter_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -20034,7 +20034,7 @@ pub unsafe fn svstnt1_scatter_u64base_index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20065,7 +20065,7 @@ pub unsafe fn svstnt1_scatter_u32base_offset_f32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20096,7 +20096,7 @@ pub unsafe fn svstnt1_scatter_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20115,7 +20115,7 @@ pub unsafe fn svstnt1_scatter_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -20146,7 +20146,7 @@ pub unsafe fn svstnt1_scatter_u64base_offset_f64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -20177,7 +20177,7 @@ pub unsafe fn svstnt1_scatter_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1d))] @@ -20195,7 +20195,7 @@ pub unsafe fn svstnt1_scatter_u64base_offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20230,7 +20230,7 @@ pub unsafe fn svstnt1b_scatter_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20265,7 +20265,7 @@ pub unsafe fn svstnt1h_scatter_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20300,7 +20300,7 @@ pub unsafe fn svstnt1w_scatter_s64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20318,7 +20318,7 @@ pub unsafe fn svstnt1b_scatter_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20336,7 +20336,7 @@ pub unsafe fn svstnt1h_scatter_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20354,7 +20354,7 @@ pub unsafe fn svstnt1w_scatter_s64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20389,7 +20389,7 @@ pub unsafe fn svstnt1b_scatter_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20424,7 +20424,7 @@ pub unsafe fn svstnt1h_scatter_u32offset_s32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20442,7 +20442,7 @@ pub unsafe fn svstnt1b_scatter_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20460,7 +20460,7 @@ pub unsafe fn svstnt1h_scatter_u32offset_u32( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20478,7 +20478,7 @@ pub unsafe fn svstnt1b_scatter_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20496,7 +20496,7 @@ pub unsafe fn svstnt1h_scatter_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20514,7 +20514,7 @@ pub unsafe fn svstnt1w_scatter_u64offset_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20532,7 +20532,7 @@ pub unsafe fn svstnt1b_scatter_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20550,7 +20550,7 @@ pub unsafe fn svstnt1h_scatter_u64offset_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20569,7 +20569,7 @@ pub unsafe fn svstnt1w_scatter_u64offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20605,7 +20605,7 @@ pub unsafe fn svstnt1b_scatter_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20641,7 +20641,7 @@ pub unsafe fn svstnt1h_scatter_u32base_offset_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20660,7 +20660,7 @@ pub unsafe fn svstnt1b_scatter_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20679,7 +20679,7 @@ pub unsafe fn svstnt1h_scatter_u32base_offset_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20715,7 +20715,7 @@ pub unsafe fn svstnt1b_scatter_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20751,7 +20751,7 @@ pub unsafe fn svstnt1h_scatter_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20787,7 +20787,7 @@ pub unsafe fn svstnt1w_scatter_u64base_offset_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20806,7 +20806,7 @@ pub unsafe fn svstnt1b_scatter_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20825,7 +20825,7 @@ pub unsafe fn svstnt1h_scatter_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20844,7 +20844,7 @@ pub unsafe fn svstnt1w_scatter_u64base_offset_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20858,7 +20858,7 @@ pub unsafe fn svstnt1b_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20872,7 +20872,7 @@ pub unsafe fn svstnt1h_scatter_u32base_s32(pg: svbool_t, bases: svuint32_t, data #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20886,7 +20886,7 @@ pub unsafe fn svstnt1b_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20900,7 +20900,7 @@ pub unsafe fn svstnt1h_scatter_u32base_u32(pg: svbool_t, bases: svuint32_t, data #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20914,7 +20914,7 @@ pub unsafe fn svstnt1b_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20928,7 +20928,7 @@ pub unsafe fn svstnt1h_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20942,7 +20942,7 @@ pub unsafe fn svstnt1w_scatter_u64base_s64(pg: svbool_t, bases: svuint64_t, data #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1b))] @@ -20956,7 +20956,7 @@ pub unsafe fn svstnt1b_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -20970,7 +20970,7 @@ pub unsafe fn svstnt1h_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -20983,7 +20983,7 @@ pub unsafe fn svstnt1w_scatter_u64base_u64(pg: svbool_t, bases: svuint64_t, data #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -21018,7 +21018,7 @@ pub unsafe fn svstnt1h_scatter_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -21053,7 +21053,7 @@ pub unsafe fn svstnt1w_scatter_s64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -21071,7 +21071,7 @@ pub unsafe fn svstnt1h_scatter_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -21089,7 +21089,7 @@ pub unsafe fn svstnt1w_scatter_s64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -21107,7 +21107,7 @@ pub unsafe fn svstnt1h_scatter_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -21125,7 +21125,7 @@ pub unsafe fn svstnt1w_scatter_u64index_s64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -21143,7 +21143,7 @@ pub unsafe fn svstnt1h_scatter_u64index_u64( #[doc = " * [`pointer::offset`](pointer#method.offset) safety constraints must be met for the address calculation for each active element (governed by `pg`)."] #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -21162,7 +21162,7 @@ pub unsafe fn svstnt1w_scatter_u64index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -21181,7 +21181,7 @@ pub unsafe fn svstnt1h_scatter_u32base_index_s32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -21200,7 +21200,7 @@ pub unsafe fn svstnt1h_scatter_u32base_index_u32( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -21219,7 +21219,7 @@ pub unsafe fn svstnt1h_scatter_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -21238,7 +21238,7 @@ pub unsafe fn svstnt1w_scatter_u64base_index_s64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1h))] @@ -21257,7 +21257,7 @@ pub unsafe fn svstnt1h_scatter_u64base_index_u64( #[doc = " * This dereferences and accesses the calculated address for each active element (governed by `pg`)."] #[doc = " * Addresses passed in `bases` lack provenance, so this is similar to using a `usize as ptr` cast (or [`core::ptr::with_exposed_provenance`]) on each lane before using it."] #[doc = " * Non-temporal accesses have special memory ordering rules, and [explicit barriers may be required for some applications](https://developer.arm.com/documentation/den0024/a/Memory-Ordering/Barriers/Non-temporal-load-and-store-pair?lang=en)."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(stnt1w))] @@ -21271,7 +21271,7 @@ pub unsafe fn svstnt1w_scatter_u64base_index_u64( } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21284,7 +21284,7 @@ pub fn svsubhnb_s16(op1: svint16_t, op2: svint16_t) -> svint8_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21293,7 +21293,7 @@ pub fn svsubhnb_n_s16(op1: svint16_t, op2: i16) -> svint8_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21306,7 +21306,7 @@ pub fn svsubhnb_s32(op1: svint32_t, op2: svint32_t) -> svint16_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21315,7 +21315,7 @@ pub fn svsubhnb_n_s32(op1: svint32_t, op2: i32) -> svint16_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21328,7 +21328,7 @@ pub fn svsubhnb_s64(op1: svint64_t, op2: svint64_t) -> svint32_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21337,7 +21337,7 @@ pub fn svsubhnb_n_s64(op1: svint64_t, op2: i64) -> svint32_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21346,7 +21346,7 @@ pub fn svsubhnb_u16(op1: svuint16_t, op2: svuint16_t) -> svuint8_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21355,7 +21355,7 @@ pub fn svsubhnb_n_u16(op1: svuint16_t, op2: u16) -> svuint8_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21364,7 +21364,7 @@ pub fn svsubhnb_u32(op1: svuint32_t, op2: svuint32_t) -> svuint16_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21373,7 +21373,7 @@ pub fn svsubhnb_n_u32(op1: svuint32_t, op2: u32) -> svuint16_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21382,7 +21382,7 @@ pub fn svsubhnb_u64(op1: svuint64_t, op2: svuint64_t) -> svuint32_t { } #[doc = "Subtract narrow high part (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnb))] @@ -21391,7 +21391,7 @@ pub fn svsubhnb_n_u64(op1: svuint64_t, op2: u64) -> svuint32_t { } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21404,7 +21404,7 @@ pub fn svsubhnt_s16(even: svint8_t, op1: svint16_t, op2: svint16_t) -> svint8_t } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21413,7 +21413,7 @@ pub fn svsubhnt_n_s16(even: svint8_t, op1: svint16_t, op2: i16) -> svint8_t { } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21426,7 +21426,7 @@ pub fn svsubhnt_s32(even: svint16_t, op1: svint32_t, op2: svint32_t) -> svint16_ } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21435,7 +21435,7 @@ pub fn svsubhnt_n_s32(even: svint16_t, op1: svint32_t, op2: i32) -> svint16_t { } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21448,7 +21448,7 @@ pub fn svsubhnt_s64(even: svint32_t, op1: svint64_t, op2: svint64_t) -> svint32_ } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21457,7 +21457,7 @@ pub fn svsubhnt_n_s64(even: svint32_t, op1: svint64_t, op2: i64) -> svint32_t { } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21466,7 +21466,7 @@ pub fn svsubhnt_u16(even: svuint8_t, op1: svuint16_t, op2: svuint16_t) -> svuint } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21475,7 +21475,7 @@ pub fn svsubhnt_n_u16(even: svuint8_t, op1: svuint16_t, op2: u16) -> svuint8_t { } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21484,7 +21484,7 @@ pub fn svsubhnt_u32(even: svuint16_t, op1: svuint32_t, op2: svuint32_t) -> svuin } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21493,7 +21493,7 @@ pub fn svsubhnt_n_u32(even: svuint16_t, op1: svuint32_t, op2: u32) -> svuint16_t } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21502,7 +21502,7 @@ pub fn svsubhnt_u64(even: svuint32_t, op1: svuint64_t, op2: svuint64_t) -> svuin } #[doc = "Subtract narrow high part (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubhnt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(subhnt))] @@ -21511,7 +21511,7 @@ pub fn svsubhnt_n_u64(even: svuint32_t, op1: svuint64_t, op2: u64) -> svuint32_t } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublb))] @@ -21524,7 +21524,7 @@ pub fn svsublb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublb))] @@ -21533,7 +21533,7 @@ pub fn svsublb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublb))] @@ -21546,7 +21546,7 @@ pub fn svsublb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublb))] @@ -21555,7 +21555,7 @@ pub fn svsublb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublb))] @@ -21568,7 +21568,7 @@ pub fn svsublb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublb))] @@ -21577,7 +21577,7 @@ pub fn svsublb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublb))] @@ -21590,7 +21590,7 @@ pub fn svsublb_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublb))] @@ -21599,7 +21599,7 @@ pub fn svsublb_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublb))] @@ -21612,7 +21612,7 @@ pub fn svsublb_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublb))] @@ -21621,7 +21621,7 @@ pub fn svsublb_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublb))] @@ -21634,7 +21634,7 @@ pub fn svsublb_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Subtract long (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublb))] @@ -21643,7 +21643,7 @@ pub fn svsublb_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { } #[doc = "Subtract long (bottom - top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublbt))] @@ -21659,7 +21659,7 @@ pub fn svsublbt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Subtract long (bottom - top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublbt))] @@ -21668,7 +21668,7 @@ pub fn svsublbt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Subtract long (bottom - top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublbt))] @@ -21684,7 +21684,7 @@ pub fn svsublbt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Subtract long (bottom - top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublbt))] @@ -21693,7 +21693,7 @@ pub fn svsublbt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Subtract long (bottom - top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublbt))] @@ -21709,7 +21709,7 @@ pub fn svsublbt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Subtract long (bottom - top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublbt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublbt))] @@ -21718,7 +21718,7 @@ pub fn svsublbt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublt))] @@ -21731,7 +21731,7 @@ pub fn svsublt_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublt))] @@ -21740,7 +21740,7 @@ pub fn svsublt_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublt))] @@ -21753,7 +21753,7 @@ pub fn svsublt_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublt))] @@ -21762,7 +21762,7 @@ pub fn svsublt_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublt))] @@ -21775,7 +21775,7 @@ pub fn svsublt_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssublt))] @@ -21784,7 +21784,7 @@ pub fn svsublt_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublt))] @@ -21797,7 +21797,7 @@ pub fn svsublt_u16(op1: svuint8_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublt))] @@ -21806,7 +21806,7 @@ pub fn svsublt_n_u16(op1: svuint8_t, op2: u8) -> svuint16_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublt))] @@ -21819,7 +21819,7 @@ pub fn svsublt_u32(op1: svuint16_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublt))] @@ -21828,7 +21828,7 @@ pub fn svsublt_n_u32(op1: svuint16_t, op2: u16) -> svuint32_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublt))] @@ -21841,7 +21841,7 @@ pub fn svsublt_u64(op1: svuint32_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Subtract long (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsublt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usublt))] @@ -21850,7 +21850,7 @@ pub fn svsublt_n_u64(op1: svuint32_t, op2: u32) -> svuint64_t { } #[doc = "Subtract long (top - bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubltb))] @@ -21866,7 +21866,7 @@ pub fn svsubltb_s16(op1: svint8_t, op2: svint8_t) -> svint16_t { } #[doc = "Subtract long (top - bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubltb))] @@ -21875,7 +21875,7 @@ pub fn svsubltb_n_s16(op1: svint8_t, op2: i8) -> svint16_t { } #[doc = "Subtract long (top - bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubltb))] @@ -21891,7 +21891,7 @@ pub fn svsubltb_s32(op1: svint16_t, op2: svint16_t) -> svint32_t { } #[doc = "Subtract long (top - bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubltb))] @@ -21900,7 +21900,7 @@ pub fn svsubltb_n_s32(op1: svint16_t, op2: i16) -> svint32_t { } #[doc = "Subtract long (top - bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubltb))] @@ -21916,7 +21916,7 @@ pub fn svsubltb_s64(op1: svint32_t, op2: svint32_t) -> svint64_t { } #[doc = "Subtract long (top - bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubltb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubltb))] @@ -21925,7 +21925,7 @@ pub fn svsubltb_n_s64(op1: svint32_t, op2: i32) -> svint64_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwb))] @@ -21938,7 +21938,7 @@ pub fn svsubwb_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwb))] @@ -21947,7 +21947,7 @@ pub fn svsubwb_n_s16(op1: svint16_t, op2: i8) -> svint16_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwb))] @@ -21960,7 +21960,7 @@ pub fn svsubwb_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwb))] @@ -21969,7 +21969,7 @@ pub fn svsubwb_n_s32(op1: svint32_t, op2: i16) -> svint32_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwb))] @@ -21982,7 +21982,7 @@ pub fn svsubwb_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwb))] @@ -21991,7 +21991,7 @@ pub fn svsubwb_n_s64(op1: svint64_t, op2: i32) -> svint64_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwb))] @@ -22004,7 +22004,7 @@ pub fn svsubwb_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwb))] @@ -22013,7 +22013,7 @@ pub fn svsubwb_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwb))] @@ -22026,7 +22026,7 @@ pub fn svsubwb_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwb))] @@ -22035,7 +22035,7 @@ pub fn svsubwb_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwb))] @@ -22048,7 +22048,7 @@ pub fn svsubwb_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Subtract wide (bottom)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwb[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwb))] @@ -22057,7 +22057,7 @@ pub fn svsubwb_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwt))] @@ -22070,7 +22070,7 @@ pub fn svsubwt_s16(op1: svint16_t, op2: svint8_t) -> svint16_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwt))] @@ -22079,7 +22079,7 @@ pub fn svsubwt_n_s16(op1: svint16_t, op2: i8) -> svint16_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwt))] @@ -22092,7 +22092,7 @@ pub fn svsubwt_s32(op1: svint32_t, op2: svint16_t) -> svint32_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwt))] @@ -22101,7 +22101,7 @@ pub fn svsubwt_n_s32(op1: svint32_t, op2: i16) -> svint32_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwt))] @@ -22114,7 +22114,7 @@ pub fn svsubwt_s64(op1: svint64_t, op2: svint32_t) -> svint64_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(ssubwt))] @@ -22123,7 +22123,7 @@ pub fn svsubwt_n_s64(op1: svint64_t, op2: i32) -> svint64_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwt))] @@ -22136,7 +22136,7 @@ pub fn svsubwt_u16(op1: svuint16_t, op2: svuint8_t) -> svuint16_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwt))] @@ -22145,7 +22145,7 @@ pub fn svsubwt_n_u16(op1: svuint16_t, op2: u8) -> svuint16_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwt))] @@ -22158,7 +22158,7 @@ pub fn svsubwt_u32(op1: svuint32_t, op2: svuint16_t) -> svuint32_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwt))] @@ -22167,7 +22167,7 @@ pub fn svsubwt_n_u32(op1: svuint32_t, op2: u16) -> svuint32_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwt))] @@ -22180,7 +22180,7 @@ pub fn svsubwt_u64(op1: svuint64_t, op2: svuint32_t) -> svuint64_t { } #[doc = "Subtract wide (top)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svsubwt[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(usubwt))] @@ -22189,7 +22189,7 @@ pub fn svsubwt_n_u64(op1: svuint64_t, op2: u32) -> svuint64_t { } #[doc = "Table lookup in two-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -22208,7 +22208,7 @@ pub fn svtbl2_f32(data: svfloat32x2_t, indices: svuint32_t) -> svfloat32_t { } #[doc = "Table lookup in two-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -22227,7 +22227,7 @@ pub fn svtbl2_f64(data: svfloat64x2_t, indices: svuint64_t) -> svfloat64_t { } #[doc = "Table lookup in two-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -22246,7 +22246,7 @@ pub fn svtbl2_s8(data: svint8x2_t, indices: svuint8_t) -> svint8_t { } #[doc = "Table lookup in two-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -22265,7 +22265,7 @@ pub fn svtbl2_s16(data: svint16x2_t, indices: svuint16_t) -> svint16_t { } #[doc = "Table lookup in two-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -22284,7 +22284,7 @@ pub fn svtbl2_s32(data: svint32x2_t, indices: svuint32_t) -> svint32_t { } #[doc = "Table lookup in two-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -22303,7 +22303,7 @@ pub fn svtbl2_s64(data: svint64x2_t, indices: svuint64_t) -> svint64_t { } #[doc = "Table lookup in two-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -22312,7 +22312,7 @@ pub fn svtbl2_u8(data: svuint8x2_t, indices: svuint8_t) -> svuint8_t { } #[doc = "Table lookup in two-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -22321,7 +22321,7 @@ pub fn svtbl2_u16(data: svuint16x2_t, indices: svuint16_t) -> svuint16_t { } #[doc = "Table lookup in two-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -22330,7 +22330,7 @@ pub fn svtbl2_u32(data: svuint32x2_t, indices: svuint32_t) -> svuint32_t { } #[doc = "Table lookup in two-vector table"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbl2[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbl))] @@ -22339,7 +22339,7 @@ pub fn svtbl2_u64(data: svuint64x2_t, indices: svuint64_t) -> svuint64_t { } #[doc = "Table lookup in single-vector table (merging)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbx))] @@ -22352,7 +22352,7 @@ pub fn svtbx_f32(fallback: svfloat32_t, data: svfloat32_t, indices: svuint32_t) } #[doc = "Table lookup in single-vector table (merging)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_f64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbx))] @@ -22365,7 +22365,7 @@ pub fn svtbx_f64(fallback: svfloat64_t, data: svfloat64_t, indices: svuint64_t) } #[doc = "Table lookup in single-vector table (merging)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbx))] @@ -22378,7 +22378,7 @@ pub fn svtbx_s8(fallback: svint8_t, data: svint8_t, indices: svuint8_t) -> svint } #[doc = "Table lookup in single-vector table (merging)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbx))] @@ -22391,7 +22391,7 @@ pub fn svtbx_s16(fallback: svint16_t, data: svint16_t, indices: svuint16_t) -> s } #[doc = "Table lookup in single-vector table (merging)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbx))] @@ -22404,7 +22404,7 @@ pub fn svtbx_s32(fallback: svint32_t, data: svint32_t, indices: svuint32_t) -> s } #[doc = "Table lookup in single-vector table (merging)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbx))] @@ -22417,7 +22417,7 @@ pub fn svtbx_s64(fallback: svint64_t, data: svint64_t, indices: svuint64_t) -> s } #[doc = "Table lookup in single-vector table (merging)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbx))] @@ -22426,7 +22426,7 @@ pub fn svtbx_u8(fallback: svuint8_t, data: svuint8_t, indices: svuint8_t) -> svu } #[doc = "Table lookup in single-vector table (merging)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbx))] @@ -22435,7 +22435,7 @@ pub fn svtbx_u16(fallback: svuint16_t, data: svuint16_t, indices: svuint16_t) -> } #[doc = "Table lookup in single-vector table (merging)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbx))] @@ -22444,7 +22444,7 @@ pub fn svtbx_u32(fallback: svuint32_t, data: svuint32_t, indices: svuint32_t) -> } #[doc = "Table lookup in single-vector table (merging)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svtbx[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(tbx))] @@ -22453,7 +22453,7 @@ pub fn svtbx_u64(fallback: svuint64_t, data: svuint64_t, indices: svuint64_t) -> } #[doc = "Unpack and extend high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_b])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(punpkhi))] @@ -22469,7 +22469,7 @@ pub fn svunpkhi_b(op: svbool_t) -> svbool_t { } #[doc = "Unpack and extend high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sunpkhi))] @@ -22485,7 +22485,7 @@ pub fn svunpkhi_s16(op: svint8_t) -> svint16_t { } #[doc = "Unpack and extend high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sunpkhi))] @@ -22501,7 +22501,7 @@ pub fn svunpkhi_s32(op: svint16_t) -> svint32_t { } #[doc = "Unpack and extend high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sunpkhi))] @@ -22517,7 +22517,7 @@ pub fn svunpkhi_s64(op: svint32_t) -> svint64_t { } #[doc = "Unpack and extend high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uunpkhi))] @@ -22533,7 +22533,7 @@ pub fn svunpkhi_u16(op: svuint8_t) -> svuint16_t { } #[doc = "Unpack and extend high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uunpkhi))] @@ -22549,7 +22549,7 @@ pub fn svunpkhi_u32(op: svuint16_t) -> svuint32_t { } #[doc = "Unpack and extend high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpkhi[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uunpkhi))] @@ -22565,7 +22565,7 @@ pub fn svunpkhi_u64(op: svuint32_t) -> svuint64_t { } #[doc = "Unpack and extend low half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_b])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(punpklo))] @@ -22581,7 +22581,7 @@ pub fn svunpklo_b(op: svbool_t) -> svbool_t { } #[doc = "Unpack and extend low half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sunpklo))] @@ -22597,7 +22597,7 @@ pub fn svunpklo_s16(op: svint8_t) -> svint16_t { } #[doc = "Unpack and extend low half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sunpklo))] @@ -22613,7 +22613,7 @@ pub fn svunpklo_s32(op: svint16_t) -> svint32_t { } #[doc = "Unpack and extend low half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(sunpklo))] @@ -22629,7 +22629,7 @@ pub fn svunpklo_s64(op: svint32_t) -> svint64_t { } #[doc = "Unpack and extend low half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uunpklo))] @@ -22645,7 +22645,7 @@ pub fn svunpklo_u16(op: svuint8_t) -> svuint16_t { } #[doc = "Unpack and extend low half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uunpklo))] @@ -22661,7 +22661,7 @@ pub fn svunpklo_u32(op: svuint16_t) -> svuint32_t { } #[doc = "Unpack and extend low half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svunpklo[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(uunpklo))] @@ -22677,7 +22677,7 @@ pub fn svunpklo_u64(op: svuint32_t) -> svuint64_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22690,7 +22690,7 @@ pub fn svuqadd_s8_m(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22699,7 +22699,7 @@ pub fn svuqadd_n_s8_m(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22708,7 +22708,7 @@ pub fn svuqadd_s8_x(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22717,7 +22717,7 @@ pub fn svuqadd_n_s8_x(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22726,7 +22726,7 @@ pub fn svuqadd_s8_z(pg: svbool_t, op1: svint8_t, op2: svuint8_t) -> svint8_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s8]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22735,7 +22735,7 @@ pub fn svuqadd_n_s8_z(pg: svbool_t, op1: svint8_t, op2: u8) -> svint8_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22748,7 +22748,7 @@ pub fn svuqadd_s16_m(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22757,7 +22757,7 @@ pub fn svuqadd_n_s16_m(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22766,7 +22766,7 @@ pub fn svuqadd_s16_x(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22775,7 +22775,7 @@ pub fn svuqadd_n_s16_x(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22784,7 +22784,7 @@ pub fn svuqadd_s16_z(pg: svbool_t, op1: svint16_t, op2: svuint16_t) -> svint16_t } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s16]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22793,7 +22793,7 @@ pub fn svuqadd_n_s16_z(pg: svbool_t, op1: svint16_t, op2: u16) -> svint16_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22806,7 +22806,7 @@ pub fn svuqadd_s32_m(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22815,7 +22815,7 @@ pub fn svuqadd_n_s32_m(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22824,7 +22824,7 @@ pub fn svuqadd_s32_x(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22833,7 +22833,7 @@ pub fn svuqadd_n_s32_x(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22842,7 +22842,7 @@ pub fn svuqadd_s32_z(pg: svbool_t, op1: svint32_t, op2: svuint32_t) -> svint32_t } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s32]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22851,7 +22851,7 @@ pub fn svuqadd_n_s32_z(pg: svbool_t, op1: svint32_t, op2: u32) -> svint32_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22864,7 +22864,7 @@ pub fn svuqadd_s64_m(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_m)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22873,7 +22873,7 @@ pub fn svuqadd_n_s64_m(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22882,7 +22882,7 @@ pub fn svuqadd_s64_x(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_x)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22891,7 +22891,7 @@ pub fn svuqadd_n_s64_x(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22900,7 +22900,7 @@ pub fn svuqadd_s64_z(pg: svbool_t, op1: svint64_t, op2: svuint64_t) -> svint64_t } #[doc = "Saturating add with unsigned addend"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svuqadd[_n_s64]_z)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(suqadd))] @@ -22909,7 +22909,7 @@ pub fn svuqadd_n_s64_z(pg: svbool_t, op1: svint64_t, op2: u64) -> svint64_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilege))] @@ -22925,7 +22925,7 @@ pub fn svwhilege_b8_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilege))] @@ -22941,7 +22941,7 @@ pub fn svwhilege_b16_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilege))] @@ -22957,7 +22957,7 @@ pub fn svwhilege_b32_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilege))] @@ -22973,7 +22973,7 @@ pub fn svwhilege_b64_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilege))] @@ -22989,7 +22989,7 @@ pub fn svwhilege_b8_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilege))] @@ -23005,7 +23005,7 @@ pub fn svwhilege_b16_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilege))] @@ -23021,7 +23021,7 @@ pub fn svwhilege_b32_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilege))] @@ -23037,7 +23037,7 @@ pub fn svwhilege_b64_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehs))] @@ -23053,7 +23053,7 @@ pub fn svwhilege_b8_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehs))] @@ -23069,7 +23069,7 @@ pub fn svwhilege_b16_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehs))] @@ -23085,7 +23085,7 @@ pub fn svwhilege_b32_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehs))] @@ -23101,7 +23101,7 @@ pub fn svwhilege_b64_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b8[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehs))] @@ -23117,7 +23117,7 @@ pub fn svwhilege_b8_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b16[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehs))] @@ -23133,7 +23133,7 @@ pub fn svwhilege_b16_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b32[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehs))] @@ -23149,7 +23149,7 @@ pub fn svwhilege_b32_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While decrementing scalar is greater than or equal to"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilege_b64[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehs))] @@ -23165,7 +23165,7 @@ pub fn svwhilege_b64_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilegt))] @@ -23181,7 +23181,7 @@ pub fn svwhilegt_b8_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilegt))] @@ -23197,7 +23197,7 @@ pub fn svwhilegt_b16_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilegt))] @@ -23213,7 +23213,7 @@ pub fn svwhilegt_b32_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilegt))] @@ -23229,7 +23229,7 @@ pub fn svwhilegt_b64_s32(op1: i32, op2: i32) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilegt))] @@ -23245,7 +23245,7 @@ pub fn svwhilegt_b8_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilegt))] @@ -23261,7 +23261,7 @@ pub fn svwhilegt_b16_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilegt))] @@ -23277,7 +23277,7 @@ pub fn svwhilegt_b32_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilegt))] @@ -23293,7 +23293,7 @@ pub fn svwhilegt_b64_s64(op1: i64, op2: i64) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehi))] @@ -23309,7 +23309,7 @@ pub fn svwhilegt_b8_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehi))] @@ -23325,7 +23325,7 @@ pub fn svwhilegt_b16_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehi))] @@ -23341,7 +23341,7 @@ pub fn svwhilegt_b32_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehi))] @@ -23357,7 +23357,7 @@ pub fn svwhilegt_b64_u32(op1: u32, op2: u32) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b8[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehi))] @@ -23373,7 +23373,7 @@ pub fn svwhilegt_b8_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b16[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehi))] @@ -23389,7 +23389,7 @@ pub fn svwhilegt_b16_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b32[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehi))] @@ -23405,7 +23405,7 @@ pub fn svwhilegt_b32_u64(op1: u64, op2: u64) -> svbool_t { } #[doc = "While decrementing scalar is greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilegt_b64[_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilehi))] @@ -23419,7 +23419,7 @@ pub fn svwhilegt_b64_u64(op1: u64, op2: u64) -> svbool_t { } unsafe { _svwhilegt_b64_u64(op1.as_signed(), op2.as_signed()).sve_into() } } -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] unsafe fn svwhilerw_8ptr(op1: *const T, op2: *const T) -> svbool_t { @@ -23437,7 +23437,7 @@ unsafe fn svwhilerw_8ptr(op1: *const T, op2: *const T) -> svbool_t { } _svwhilerw_8ptr(op1, op2) } -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] unsafe fn svwhilerw_16ptr(op1: *const T, op2: *const T) -> svbool_t { @@ -23455,7 +23455,7 @@ unsafe fn svwhilerw_16ptr(op1: *const T, op2: *const T) -> svbool_t { } _svwhilerw_16ptr(op1, op2).sve_into() } -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] unsafe fn svwhilerw_32ptr(op1: *const T, op2: *const T) -> svbool_t { @@ -23473,7 +23473,7 @@ unsafe fn svwhilerw_32ptr(op1: *const T, op2: *const T) -> svbool_t { } _svwhilerw_32ptr(op1, op2).sve_into() } -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] unsafe fn svwhilerw_64ptr(op1: *const T, op2: *const T) -> svbool_t { @@ -23495,7 +23495,7 @@ unsafe fn svwhilerw_64ptr(op1: *const T, op2: *const T) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f32])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilerw))] @@ -23506,7 +23506,7 @@ pub unsafe fn svwhilerw_f32(op1: *const f32, op2: *const f32) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_f64])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilerw))] @@ -23517,7 +23517,7 @@ pub unsafe fn svwhilerw_f64(op1: *const f64, op2: *const f64) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s8])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilerw))] @@ -23528,7 +23528,7 @@ pub unsafe fn svwhilerw_s8(op1: *const i8, op2: *const i8) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s16])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilerw))] @@ -23539,7 +23539,7 @@ pub unsafe fn svwhilerw_s16(op1: *const i16, op2: *const i16) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s32])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilerw))] @@ -23550,7 +23550,7 @@ pub unsafe fn svwhilerw_s32(op1: *const i32, op2: *const i32) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_s64])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilerw))] @@ -23561,7 +23561,7 @@ pub unsafe fn svwhilerw_s64(op1: *const i64, op2: *const i64) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u8])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilerw))] @@ -23572,7 +23572,7 @@ pub unsafe fn svwhilerw_u8(op1: *const u8, op2: *const u8) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u16])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilerw))] @@ -23583,7 +23583,7 @@ pub unsafe fn svwhilerw_u16(op1: *const u16, op2: *const u16) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u32])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilerw))] @@ -23594,14 +23594,14 @@ pub unsafe fn svwhilerw_u32(op1: *const u32, op2: *const u32) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilerw[_u64])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilerw))] pub unsafe fn svwhilerw_u64(op1: *const u64, op2: *const u64) -> svbool_t { svwhilerw_64ptr::(op1, op2) } -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] unsafe fn svwhilewr_8ptr(op1: *const T, op2: *const T) -> svbool_t { @@ -23619,7 +23619,7 @@ unsafe fn svwhilewr_8ptr(op1: *const T, op2: *const T) -> svbool_t { } _svwhilewr_8ptr(op1, op2) } -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] unsafe fn svwhilewr_16ptr(op1: *const T, op2: *const T) -> svbool_t { @@ -23637,7 +23637,7 @@ unsafe fn svwhilewr_16ptr(op1: *const T, op2: *const T) -> svbool_t { } _svwhilewr_16ptr(op1, op2).sve_into() } -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] unsafe fn svwhilewr_32ptr(op1: *const T, op2: *const T) -> svbool_t { @@ -23655,7 +23655,7 @@ unsafe fn svwhilewr_32ptr(op1: *const T, op2: *const T) -> svbool_t { } _svwhilewr_32ptr(op1, op2).sve_into() } -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] unsafe fn svwhilewr_64ptr(op1: *const T, op2: *const T) -> svbool_t { @@ -23677,7 +23677,7 @@ unsafe fn svwhilewr_64ptr(op1: *const T, op2: *const T) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f32])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilewr))] @@ -23688,7 +23688,7 @@ pub unsafe fn svwhilewr_f32(op1: *const f32, op2: *const f32) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_f64])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilewr))] @@ -23699,7 +23699,7 @@ pub unsafe fn svwhilewr_f64(op1: *const f64, op2: *const f64) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s8])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilewr))] @@ -23710,7 +23710,7 @@ pub unsafe fn svwhilewr_s8(op1: *const i8, op2: *const i8) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s16])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilewr))] @@ -23721,7 +23721,7 @@ pub unsafe fn svwhilewr_s16(op1: *const i16, op2: *const i16) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s32])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilewr))] @@ -23732,7 +23732,7 @@ pub unsafe fn svwhilewr_s32(op1: *const i32, op2: *const i32) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_s64])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilewr))] @@ -23743,7 +23743,7 @@ pub unsafe fn svwhilewr_s64(op1: *const i64, op2: *const i64) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u8])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilewr))] @@ -23754,7 +23754,7 @@ pub unsafe fn svwhilewr_u8(op1: *const u8, op2: *const u8) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u16])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilewr))] @@ -23765,7 +23765,7 @@ pub unsafe fn svwhilewr_u16(op1: *const u16, op2: *const u16) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u32])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilewr))] @@ -23776,7 +23776,7 @@ pub unsafe fn svwhilewr_u32(op1: *const u32, op2: *const u32) -> svbool_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svwhilewr[_u64])"] #[doc = "## Safety"] #[doc = " * [`pointer::byte_offset_from`](pointer#method.byte_offset_from) safety constraints must be met for at least the base pointers, `op1` and `op2`."] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(whilewr))] @@ -23785,7 +23785,7 @@ pub unsafe fn svwhilewr_u64(op1: *const u64, op2: *const u64) -> svbool_t { } #[doc = "Bitwise exclusive OR and rotate right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(xar, IMM3 = 1))] @@ -23799,7 +23799,7 @@ pub fn svxar_n_s8(op1: svint8_t, op2: svint8_t) -> svint8_t { } #[doc = "Bitwise exclusive OR and rotate right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(xar, IMM3 = 1))] @@ -23813,7 +23813,7 @@ pub fn svxar_n_s16(op1: svint16_t, op2: svint16_t) -> svint16_t } #[doc = "Bitwise exclusive OR and rotate right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(xar, IMM3 = 1))] @@ -23827,7 +23827,7 @@ pub fn svxar_n_s32(op1: svint32_t, op2: svint32_t) -> svint32_t } #[doc = "Bitwise exclusive OR and rotate right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_s64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(xar, IMM3 = 1))] @@ -23841,7 +23841,7 @@ pub fn svxar_n_s64(op1: svint64_t, op2: svint64_t) -> svint64_t } #[doc = "Bitwise exclusive OR and rotate right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u8])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(xar, IMM3 = 1))] @@ -23851,7 +23851,7 @@ pub fn svxar_n_u8(op1: svuint8_t, op2: svuint8_t) -> svuint8_t } #[doc = "Bitwise exclusive OR and rotate right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u16])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(xar, IMM3 = 1))] @@ -23861,7 +23861,7 @@ pub fn svxar_n_u16(op1: svuint16_t, op2: svuint16_t) -> svuint1 } #[doc = "Bitwise exclusive OR and rotate right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u32])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(xar, IMM3 = 1))] @@ -23871,7 +23871,7 @@ pub fn svxar_n_u32(op1: svuint32_t, op2: svuint32_t) -> svuint3 } #[doc = "Bitwise exclusive OR and rotate right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/svxar[_n_u64])"] -#[inline(always)] +#[inline] #[target_feature(enable = "sve,sve2")] #[unstable(feature = "stdarch_aarch64_sve", issue = "145052")] #[cfg_attr(test, assert_instr(xar, IMM3 = 1))] diff --git a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs index 28a978fdbbc3e..84c1a91adf79f 100644 --- a/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs +++ b/library/stdarch/crates/core_arch/src/arm_shared/neon/generated.rs @@ -14,7 +14,7 @@ use super::*; #[doc = "CRC32 single round checksum for bytes (8 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32b)"] -#[inline(always)] +#[inline] #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32b))] @@ -39,7 +39,7 @@ pub fn __crc32b(crc: u32, data: u8) -> u32 { } #[doc = "CRC32-C single round checksum for bytes (8 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cb)"] -#[inline(always)] +#[inline] #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32cb))] @@ -64,7 +64,7 @@ pub fn __crc32cb(crc: u32, data: u8) -> u32 { } #[doc = "CRC32-C single round checksum for quad words (64 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cd)"] -#[inline(always)] +#[inline] #[target_feature(enable = "crc")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(crc32cw))] @@ -83,7 +83,7 @@ pub fn __crc32cd(crc: u32, data: u64) -> u32 { } #[doc = "CRC32-C single round checksum for bytes (16 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32ch)"] -#[inline(always)] +#[inline] #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32ch))] @@ -108,7 +108,7 @@ pub fn __crc32ch(crc: u32, data: u16) -> u32 { } #[doc = "CRC32-C single round checksum for bytes (32 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32cw)"] -#[inline(always)] +#[inline] #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32cw))] @@ -133,7 +133,7 @@ pub fn __crc32cw(crc: u32, data: u32) -> u32 { } #[doc = "CRC32 single round checksum for quad words (64 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32d)"] -#[inline(always)] +#[inline] #[target_feature(enable = "crc")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(crc32w))] @@ -152,7 +152,7 @@ pub fn __crc32d(crc: u32, data: u64) -> u32 { } #[doc = "CRC32 single round checksum for bytes (16 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32h)"] -#[inline(always)] +#[inline] #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32h))] @@ -177,7 +177,7 @@ pub fn __crc32h(crc: u32, data: u16) -> u32 { } #[doc = "CRC32 single round checksum for bytes (32 bits)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/__crc32w)"] -#[inline(always)] +#[inline] #[target_feature(enable = "crc")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(crc32w))] @@ -202,7 +202,7 @@ pub fn __crc32w(crc: u32, data: u32) -> u32 { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -220,7 +220,7 @@ fn priv_vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -238,7 +238,7 @@ fn priv_vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -256,7 +256,7 @@ fn priv_vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -274,7 +274,7 @@ fn priv_vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -292,7 +292,7 @@ fn priv_vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -310,7 +310,7 @@ fn priv_vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -328,7 +328,7 @@ fn priv_vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -346,7 +346,7 @@ fn priv_vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -364,7 +364,7 @@ fn priv_vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -382,7 +382,7 @@ fn priv_vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadal_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -400,7 +400,7 @@ fn priv_vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/priv_vpadalq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -418,7 +418,7 @@ fn priv_vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { } #[doc = "Absolute difference and accumulate (64-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))] @@ -439,7 +439,7 @@ pub fn vaba_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { } #[doc = "Absolute difference and accumulate (64-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))] @@ -460,7 +460,7 @@ pub fn vaba_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { } #[doc = "Absolute difference and accumulate (64-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))] @@ -481,7 +481,7 @@ pub fn vaba_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { } #[doc = "Absolute difference and accumulate (64-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))] @@ -502,7 +502,7 @@ pub fn vaba_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { } #[doc = "Absolute difference and accumulate (64-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))] @@ -523,7 +523,7 @@ pub fn vaba_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { } #[doc = "Absolute difference and accumulate (64-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaba_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))] @@ -544,7 +544,7 @@ pub fn vaba_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s8"))] @@ -569,7 +569,7 @@ pub fn vabal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { } #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s16"))] @@ -594,7 +594,7 @@ pub fn vabal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { } #[doc = "Signed Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.s32"))] @@ -619,7 +619,7 @@ pub fn vabal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { } #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u8"))] @@ -641,7 +641,7 @@ pub fn vabal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { } #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u16"))] @@ -663,7 +663,7 @@ pub fn vabal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { } #[doc = "Unsigned Absolute difference and Accumulate Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabal_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabal.u32"))] @@ -685,7 +685,7 @@ pub fn vabal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { } #[doc = "Absolute difference and accumulate (128-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s16"))] @@ -706,7 +706,7 @@ pub fn vabaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { } #[doc = "Absolute difference and accumulate (128-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s32"))] @@ -727,7 +727,7 @@ pub fn vabaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { } #[doc = "Absolute difference and accumulate (128-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.s8"))] @@ -748,7 +748,7 @@ pub fn vabaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { } #[doc = "Absolute difference and accumulate (128-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u16"))] @@ -769,7 +769,7 @@ pub fn vabaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { } #[doc = "Absolute difference and accumulate (128-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u32"))] @@ -790,7 +790,7 @@ pub fn vabaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { } #[doc = "Absolute difference and accumulate (128-bit)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabaq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vaba.u8"))] @@ -811,7 +811,7 @@ pub fn vabaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { } #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f16"))] #[cfg_attr( @@ -841,7 +841,7 @@ pub fn vabd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f16"))] #[cfg_attr( @@ -871,7 +871,7 @@ pub fn vabdq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] @@ -900,7 +900,7 @@ pub fn vabd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Absolute difference between the arguments of Floating"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.f32"))] @@ -929,7 +929,7 @@ pub fn vabdq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] @@ -958,7 +958,7 @@ pub fn vabd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s8"))] @@ -987,7 +987,7 @@ pub fn vabdq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] @@ -1016,7 +1016,7 @@ pub fn vabd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s16"))] @@ -1045,7 +1045,7 @@ pub fn vabdq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] @@ -1074,7 +1074,7 @@ pub fn vabd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.s32"))] @@ -1103,7 +1103,7 @@ pub fn vabdq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] @@ -1132,7 +1132,7 @@ pub fn vabd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u8"))] @@ -1161,7 +1161,7 @@ pub fn vabdq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] @@ -1190,7 +1190,7 @@ pub fn vabd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u16"))] @@ -1219,7 +1219,7 @@ pub fn vabdq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabd_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] @@ -1248,7 +1248,7 @@ pub fn vabd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Absolute difference between the arguments"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabd.u32"))] @@ -1277,7 +1277,7 @@ pub fn vabdq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s8"))] @@ -1301,7 +1301,7 @@ pub fn vabdl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { } #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s16"))] @@ -1325,7 +1325,7 @@ pub fn vabdl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { } #[doc = "Signed Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.s32"))] @@ -1349,7 +1349,7 @@ pub fn vabdl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { } #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u8"))] @@ -1370,7 +1370,7 @@ pub fn vabdl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { } #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u16"))] @@ -1391,7 +1391,7 @@ pub fn vabdl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { } #[doc = "Unsigned Absolute difference Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabdl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vabdl.u32"))] @@ -1412,7 +1412,7 @@ pub fn vabdl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { } #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( @@ -1434,7 +1434,7 @@ pub fn vabs_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( @@ -1456,7 +1456,7 @@ pub fn vabsq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] @@ -1477,7 +1477,7 @@ pub fn vabs_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] @@ -1498,7 +1498,7 @@ pub fn vabsq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] @@ -1523,7 +1523,7 @@ pub fn vabs_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] @@ -1548,7 +1548,7 @@ pub fn vabsq_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] @@ -1573,7 +1573,7 @@ pub fn vabs_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] @@ -1598,7 +1598,7 @@ pub fn vabsq_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabs_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] @@ -1623,7 +1623,7 @@ pub fn vabs_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Absolute value (wrapping)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] @@ -1648,7 +1648,7 @@ pub fn vabsq_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Floating-point absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vabsh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vabs))] #[cfg_attr( @@ -1663,7 +1663,7 @@ pub fn vabsh_f16(a: f16) -> f16 { } #[doc = "Floating-point Add (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vadd.f16"))] #[cfg_attr( @@ -1685,7 +1685,7 @@ pub fn vadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point Add (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vadd.f16"))] #[cfg_attr( @@ -1707,7 +1707,7 @@ pub fn vaddq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1728,7 +1728,7 @@ pub fn vadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1749,7 +1749,7 @@ pub fn vadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1770,7 +1770,7 @@ pub fn vadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1791,7 +1791,7 @@ pub fn vadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1812,7 +1812,7 @@ pub fn vadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1833,7 +1833,7 @@ pub fn vadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1854,7 +1854,7 @@ pub fn vadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1875,7 +1875,7 @@ pub fn vaddq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1896,7 +1896,7 @@ pub fn vaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1917,7 +1917,7 @@ pub fn vaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1938,7 +1938,7 @@ pub fn vaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1959,7 +1959,7 @@ pub fn vaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -1980,7 +1980,7 @@ pub fn vaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -2001,7 +2001,7 @@ pub fn vaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -2022,7 +2022,7 @@ pub fn vaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Vector add."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vadd))] @@ -2043,7 +2043,7 @@ pub fn vaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -2064,7 +2064,7 @@ pub fn vadd_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -2085,7 +2085,7 @@ pub fn vaddq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -2106,7 +2106,7 @@ pub fn vadd_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -2127,7 +2127,7 @@ pub fn vaddq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vadd_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -2148,7 +2148,7 @@ pub fn vadd_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x1_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -2169,7 +2169,7 @@ pub fn vaddq_p64(a: poly64x2_t, b: poly64x2_t) -> poly64x2_t { } #[doc = "Add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddh_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vadd.f16"))] #[cfg_attr( @@ -2184,7 +2184,7 @@ pub fn vaddh_f16(a: f16, b: f16) -> f16 { } #[doc = "Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2208,7 +2208,7 @@ pub fn vaddhn_high_s16(r: int8x8_t, a: int16x8_t, b: int16x8_t) -> int8x16_t { } #[doc = "Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2232,7 +2232,7 @@ pub fn vaddhn_high_s32(r: int16x4_t, a: int32x4_t, b: int32x4_t) -> int16x8_t { } #[doc = "Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2256,7 +2256,7 @@ pub fn vaddhn_high_s64(r: int32x2_t, a: int64x2_t, b: int64x2_t) -> int32x4_t { } #[doc = "Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2280,7 +2280,7 @@ pub fn vaddhn_high_u16(r: uint8x8_t, a: uint16x8_t, b: uint16x8_t) -> uint8x16_t } #[doc = "Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2304,7 +2304,7 @@ pub fn vaddhn_high_u32(r: uint16x4_t, a: uint32x4_t, b: uint32x4_t) -> uint16x8_ } #[doc = "Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_high_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2328,7 +2328,7 @@ pub fn vaddhn_high_u64(r: uint32x2_t, a: uint64x2_t, b: uint64x2_t) -> uint32x4_ } #[doc = "Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2349,7 +2349,7 @@ pub fn vaddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { } #[doc = "Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2370,7 +2370,7 @@ pub fn vaddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { } #[doc = "Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2391,7 +2391,7 @@ pub fn vaddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { } #[doc = "Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2412,7 +2412,7 @@ pub fn vaddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { } #[doc = "Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2433,7 +2433,7 @@ pub fn vaddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { } #[doc = "Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddhn_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddhn))] @@ -2454,7 +2454,7 @@ pub fn vaddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { } #[doc = "Signed Add Long (vector, high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2481,7 +2481,7 @@ pub fn vaddl_high_s16(a: int16x8_t, b: int16x8_t) -> int32x4_t { } #[doc = "Signed Add Long (vector, high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2508,7 +2508,7 @@ pub fn vaddl_high_s32(a: int32x4_t, b: int32x4_t) -> int64x2_t { } #[doc = "Signed Add Long (vector, high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2535,7 +2535,7 @@ pub fn vaddl_high_s8(a: int8x16_t, b: int8x16_t) -> int16x8_t { } #[doc = "Signed Add Long (vector, high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2562,7 +2562,7 @@ pub fn vaddl_high_u16(a: uint16x8_t, b: uint16x8_t) -> uint32x4_t { } #[doc = "Signed Add Long (vector, high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2589,7 +2589,7 @@ pub fn vaddl_high_u32(a: uint32x4_t, b: uint32x4_t) -> uint64x2_t { } #[doc = "Signed Add Long (vector, high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2616,7 +2616,7 @@ pub fn vaddl_high_u8(a: uint8x16_t, b: uint8x16_t) -> uint16x8_t { } #[doc = "Add Long (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2641,7 +2641,7 @@ pub fn vaddl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { } #[doc = "Add Long (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2666,7 +2666,7 @@ pub fn vaddl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { } #[doc = "Add Long (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2691,7 +2691,7 @@ pub fn vaddl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { } #[doc = "Add Long (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2716,7 +2716,7 @@ pub fn vaddl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { } #[doc = "Add Long (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2741,7 +2741,7 @@ pub fn vaddl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { } #[doc = "Add Long (vector)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddl))] @@ -2766,7 +2766,7 @@ pub fn vaddl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { } #[doc = "Bitwise exclusive OR"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddq_p128)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -2787,7 +2787,7 @@ pub fn vaddq_p128(a: p128, b: p128) -> p128 { } #[doc = "Add Wide (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -2812,7 +2812,7 @@ pub fn vaddw_high_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { } #[doc = "Add Wide (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -2837,7 +2837,7 @@ pub fn vaddw_high_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { } #[doc = "Add Wide (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -2862,7 +2862,7 @@ pub fn vaddw_high_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { } #[doc = "Add Wide (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -2887,7 +2887,7 @@ pub fn vaddw_high_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { } #[doc = "Add Wide (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -2912,7 +2912,7 @@ pub fn vaddw_high_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { } #[doc = "Add Wide (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -2937,7 +2937,7 @@ pub fn vaddw_high_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { } #[doc = "Add Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -2961,7 +2961,7 @@ pub fn vaddw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { } #[doc = "Add Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -2985,7 +2985,7 @@ pub fn vaddw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { } #[doc = "Add Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -3009,7 +3009,7 @@ pub fn vaddw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { } #[doc = "Add Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -3033,7 +3033,7 @@ pub fn vaddw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { } #[doc = "Add Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -3057,7 +3057,7 @@ pub fn vaddw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { } #[doc = "Add Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaddw_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vaddw))] @@ -3081,7 +3081,7 @@ pub fn vaddw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { } #[doc = "AES single round encryption."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesdq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aesd))] @@ -3106,7 +3106,7 @@ pub fn vaesdq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { } #[doc = "AES single round encryption."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaeseq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aese))] @@ -3131,7 +3131,7 @@ pub fn vaeseq_u8(data: uint8x16_t, key: uint8x16_t) -> uint8x16_t { } #[doc = "AES inverse mix columns."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesimcq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aesimc))] @@ -3156,7 +3156,7 @@ pub fn vaesimcq_u8(data: uint8x16_t) -> uint8x16_t { } #[doc = "AES mix columns."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vaesmcq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(aesmc))] @@ -3181,7 +3181,7 @@ pub fn vaesmcq_u8(data: uint8x16_t) -> uint8x16_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3202,7 +3202,7 @@ pub fn vand_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3223,7 +3223,7 @@ pub fn vandq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3244,7 +3244,7 @@ pub fn vand_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3265,7 +3265,7 @@ pub fn vandq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3286,7 +3286,7 @@ pub fn vand_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3307,7 +3307,7 @@ pub fn vandq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3328,7 +3328,7 @@ pub fn vand_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3349,7 +3349,7 @@ pub fn vandq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3370,7 +3370,7 @@ pub fn vand_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3391,7 +3391,7 @@ pub fn vandq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3412,7 +3412,7 @@ pub fn vand_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3433,7 +3433,7 @@ pub fn vandq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3454,7 +3454,7 @@ pub fn vand_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3475,7 +3475,7 @@ pub fn vandq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vand_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3496,7 +3496,7 @@ pub fn vand_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Vector bitwise and"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vandq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vand))] @@ -3517,7 +3517,7 @@ pub fn vandq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3539,7 +3539,7 @@ pub fn vbic_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3561,7 +3561,7 @@ pub fn vbic_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3583,7 +3583,7 @@ pub fn vbic_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3605,7 +3605,7 @@ pub fn vbic_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3627,7 +3627,7 @@ pub fn vbicq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3649,7 +3649,7 @@ pub fn vbicq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3671,7 +3671,7 @@ pub fn vbicq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3693,7 +3693,7 @@ pub fn vbicq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3715,7 +3715,7 @@ pub fn vbic_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3737,7 +3737,7 @@ pub fn vbic_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3759,7 +3759,7 @@ pub fn vbic_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbic_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3781,7 +3781,7 @@ pub fn vbic_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3803,7 +3803,7 @@ pub fn vbicq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3825,7 +3825,7 @@ pub fn vbicq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3847,7 +3847,7 @@ pub fn vbicq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Vector bitwise bit clear."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbicq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbic))] @@ -3869,7 +3869,7 @@ pub fn vbicq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -3897,7 +3897,7 @@ pub fn vbsl_f16(a: uint16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,fp16")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -3925,7 +3925,7 @@ pub fn vbslq_f16(a: uint16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -3952,7 +3952,7 @@ pub fn vbsl_f32(a: uint32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -3979,7 +3979,7 @@ pub fn vbsl_p16(a: uint16x4_t, b: poly16x4_t, c: poly16x4_t) -> poly16x4_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4006,7 +4006,7 @@ pub fn vbsl_p8(a: uint8x8_t, b: poly8x8_t, c: poly8x8_t) -> poly8x8_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4033,7 +4033,7 @@ pub fn vbsl_s16(a: uint16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4060,7 +4060,7 @@ pub fn vbsl_s32(a: uint32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4087,7 +4087,7 @@ pub fn vbsl_s64(a: uint64x1_t, b: int64x1_t, c: int64x1_t) -> int64x1_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4114,7 +4114,7 @@ pub fn vbsl_s8(a: uint8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4141,7 +4141,7 @@ pub fn vbslq_f32(a: uint32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4168,7 +4168,7 @@ pub fn vbslq_p16(a: uint16x8_t, b: poly16x8_t, c: poly16x8_t) -> poly16x8_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4195,7 +4195,7 @@ pub fn vbslq_p8(a: uint8x16_t, b: poly8x16_t, c: poly8x16_t) -> poly8x16_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4222,7 +4222,7 @@ pub fn vbslq_s16(a: uint16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4249,7 +4249,7 @@ pub fn vbslq_s32(a: uint32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4276,7 +4276,7 @@ pub fn vbslq_s64(a: uint64x2_t, b: int64x2_t, c: int64x2_t) -> int64x2_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4303,7 +4303,7 @@ pub fn vbslq_s8(a: uint8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4330,7 +4330,7 @@ pub fn vbsl_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4357,7 +4357,7 @@ pub fn vbsl_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4384,7 +4384,7 @@ pub fn vbsl_u64(a: uint64x1_t, b: uint64x1_t, c: uint64x1_t) -> uint64x1_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbsl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4411,7 +4411,7 @@ pub fn vbsl_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4438,7 +4438,7 @@ pub fn vbslq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4465,7 +4465,7 @@ pub fn vbslq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4492,7 +4492,7 @@ pub fn vbslq_u64(a: uint64x2_t, b: uint64x2_t, c: uint64x2_t) -> uint64x2_t { } #[doc = "Bitwise Select."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vbslq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vbsl))] @@ -4519,7 +4519,7 @@ pub fn vbslq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] #[cfg_attr( @@ -4549,7 +4549,7 @@ pub fn vcage_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] #[cfg_attr( @@ -4579,7 +4579,7 @@ pub fn vcageq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcage_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] @@ -4608,7 +4608,7 @@ pub fn vcage_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point absolute compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcageq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] @@ -4637,7 +4637,7 @@ pub fn vcageq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] #[cfg_attr( @@ -4667,7 +4667,7 @@ pub fn vcagt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] #[cfg_attr( @@ -4697,7 +4697,7 @@ pub fn vcagtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagt_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] @@ -4726,7 +4726,7 @@ pub fn vcagt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point absolute compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcagtq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] @@ -4755,7 +4755,7 @@ pub fn vcagtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] #[cfg_attr( @@ -4777,7 +4777,7 @@ pub fn vcale_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f16"))] #[cfg_attr( @@ -4799,7 +4799,7 @@ pub fn vcaleq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcale_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] @@ -4820,7 +4820,7 @@ pub fn vcale_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point absolute compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaleq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacge.f32"))] @@ -4841,7 +4841,7 @@ pub fn vcaleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] #[cfg_attr( @@ -4863,7 +4863,7 @@ pub fn vcalt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f16"))] #[cfg_attr( @@ -4885,7 +4885,7 @@ pub fn vcaltq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcalt_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] @@ -4906,7 +4906,7 @@ pub fn vcalt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point absolute compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcaltq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vacgt.f32"))] @@ -4927,7 +4927,7 @@ pub fn vcaltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f16"))] #[cfg_attr( @@ -4949,7 +4949,7 @@ pub fn vceq_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f16"))] #[cfg_attr( @@ -4971,7 +4971,7 @@ pub fn vceqq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] @@ -4992,7 +4992,7 @@ pub fn vceq_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point compare equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.f32"))] @@ -5013,7 +5013,7 @@ pub fn vceqq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] @@ -5034,7 +5034,7 @@ pub fn vceq_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] @@ -5055,7 +5055,7 @@ pub fn vceqq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] @@ -5076,7 +5076,7 @@ pub fn vceq_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] @@ -5097,7 +5097,7 @@ pub fn vceqq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] @@ -5118,7 +5118,7 @@ pub fn vceq_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] @@ -5139,7 +5139,7 @@ pub fn vceqq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] @@ -5160,7 +5160,7 @@ pub fn vceq_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] @@ -5181,7 +5181,7 @@ pub fn vceqq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] @@ -5202,7 +5202,7 @@ pub fn vceq_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i16"))] @@ -5223,7 +5223,7 @@ pub fn vceqq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] @@ -5244,7 +5244,7 @@ pub fn vceq_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i32"))] @@ -5265,7 +5265,7 @@ pub fn vceqq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] @@ -5286,7 +5286,7 @@ pub fn vceq_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { } #[doc = "Compare bitwise Equal (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vceqq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vceq.i8"))] @@ -5307,7 +5307,7 @@ pub fn vceqq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { } #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( @@ -5329,7 +5329,7 @@ pub fn vcge_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( @@ -5351,7 +5351,7 @@ pub fn vcgeq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] @@ -5372,7 +5372,7 @@ pub fn vcge_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point compare greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] @@ -5393,7 +5393,7 @@ pub fn vcgeq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] @@ -5414,7 +5414,7 @@ pub fn vcge_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] @@ -5435,7 +5435,7 @@ pub fn vcgeq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] @@ -5456,7 +5456,7 @@ pub fn vcge_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] @@ -5477,7 +5477,7 @@ pub fn vcgeq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] @@ -5498,7 +5498,7 @@ pub fn vcge_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Compare signed greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] @@ -5519,7 +5519,7 @@ pub fn vcgeq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] @@ -5540,7 +5540,7 @@ pub fn vcge_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] @@ -5561,7 +5561,7 @@ pub fn vcgeq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] @@ -5582,7 +5582,7 @@ pub fn vcge_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] @@ -5603,7 +5603,7 @@ pub fn vcgeq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcge_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] @@ -5624,7 +5624,7 @@ pub fn vcge_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Compare unsigned greater than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgeq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] @@ -5645,7 +5645,7 @@ pub fn vcgeq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgez_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( @@ -5668,7 +5668,7 @@ pub fn vcgez_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point compare greater than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgezq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( @@ -5691,7 +5691,7 @@ pub fn vcgezq_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( @@ -5713,7 +5713,7 @@ pub fn vcgt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( @@ -5735,7 +5735,7 @@ pub fn vcgtq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] @@ -5756,7 +5756,7 @@ pub fn vcgt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point compare greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] @@ -5777,7 +5777,7 @@ pub fn vcgtq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] @@ -5798,7 +5798,7 @@ pub fn vcgt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] @@ -5819,7 +5819,7 @@ pub fn vcgtq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] @@ -5840,7 +5840,7 @@ pub fn vcgt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] @@ -5861,7 +5861,7 @@ pub fn vcgtq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] @@ -5882,7 +5882,7 @@ pub fn vcgt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Compare signed greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] @@ -5903,7 +5903,7 @@ pub fn vcgtq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] @@ -5924,7 +5924,7 @@ pub fn vcgt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] @@ -5945,7 +5945,7 @@ pub fn vcgtq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] @@ -5966,7 +5966,7 @@ pub fn vcgt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] @@ -5987,7 +5987,7 @@ pub fn vcgtq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgt_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] @@ -6008,7 +6008,7 @@ pub fn vcgt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Compare unsigned greater than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] @@ -6029,7 +6029,7 @@ pub fn vcgtq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtz_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( @@ -6052,7 +6052,7 @@ pub fn vcgtz_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point compare greater than zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcgtzq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( @@ -6075,7 +6075,7 @@ pub fn vcgtzq_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( @@ -6097,7 +6097,7 @@ pub fn vcle_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f16"))] #[cfg_attr( @@ -6119,7 +6119,7 @@ pub fn vcleq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] @@ -6140,7 +6140,7 @@ pub fn vcle_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point compare less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.f32"))] @@ -6161,7 +6161,7 @@ pub fn vcleq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] @@ -6182,7 +6182,7 @@ pub fn vcle_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s8"))] @@ -6203,7 +6203,7 @@ pub fn vcleq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] @@ -6224,7 +6224,7 @@ pub fn vcle_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s16"))] @@ -6245,7 +6245,7 @@ pub fn vcleq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] @@ -6266,7 +6266,7 @@ pub fn vcle_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Compare signed less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.s32"))] @@ -6287,7 +6287,7 @@ pub fn vcleq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] @@ -6308,7 +6308,7 @@ pub fn vcle_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u8"))] @@ -6329,7 +6329,7 @@ pub fn vcleq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] @@ -6350,7 +6350,7 @@ pub fn vcle_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u16"))] @@ -6371,7 +6371,7 @@ pub fn vcleq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcle_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] @@ -6392,7 +6392,7 @@ pub fn vcle_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Compare unsigned less than or equal"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcleq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcge.u32"))] @@ -6413,7 +6413,7 @@ pub fn vcleq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclez_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcle.f16"))] #[cfg_attr( @@ -6436,7 +6436,7 @@ pub fn vclez_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point compare less than or equal to zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclezq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcle.f16"))] #[cfg_attr( @@ -6459,7 +6459,7 @@ pub fn vclezq_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] @@ -6488,7 +6488,7 @@ pub fn vcls_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s8"))] @@ -6517,7 +6517,7 @@ pub fn vclsq_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] @@ -6546,7 +6546,7 @@ pub fn vcls_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s16"))] @@ -6575,7 +6575,7 @@ pub fn vclsq_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] @@ -6604,7 +6604,7 @@ pub fn vcls_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcls.s32"))] @@ -6633,7 +6633,7 @@ pub fn vclsq_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] @@ -6654,7 +6654,7 @@ pub fn vcls_u8(a: uint8x8_t) -> int8x8_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] @@ -6675,7 +6675,7 @@ pub fn vclsq_u8(a: uint8x16_t) -> int8x16_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] @@ -6696,7 +6696,7 @@ pub fn vcls_u16(a: uint16x4_t) -> int16x4_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] @@ -6717,7 +6717,7 @@ pub fn vclsq_u16(a: uint16x8_t) -> int16x8_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcls_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] @@ -6738,7 +6738,7 @@ pub fn vcls_u32(a: uint32x2_t) -> int32x2_t { } #[doc = "Count leading sign bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclsq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcls))] @@ -6759,7 +6759,7 @@ pub fn vclsq_u32(a: uint32x4_t) -> int32x4_t { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( @@ -6781,7 +6781,7 @@ pub fn vclt_f16(a: float16x4_t, b: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f16"))] #[cfg_attr( @@ -6803,7 +6803,7 @@ pub fn vcltq_f16(a: float16x8_t, b: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] @@ -6824,7 +6824,7 @@ pub fn vclt_f32(a: float32x2_t, b: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.f32"))] @@ -6845,7 +6845,7 @@ pub fn vcltq_f32(a: float32x4_t, b: float32x4_t) -> uint32x4_t { } #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] @@ -6866,7 +6866,7 @@ pub fn vclt_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s8"))] @@ -6887,7 +6887,7 @@ pub fn vcltq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] @@ -6908,7 +6908,7 @@ pub fn vclt_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s16"))] @@ -6929,7 +6929,7 @@ pub fn vcltq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] @@ -6950,7 +6950,7 @@ pub fn vclt_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Compare signed less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.s32"))] @@ -6971,7 +6971,7 @@ pub fn vcltq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] @@ -6992,7 +6992,7 @@ pub fn vclt_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u8"))] @@ -7013,7 +7013,7 @@ pub fn vcltq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] @@ -7034,7 +7034,7 @@ pub fn vclt_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u16"))] @@ -7055,7 +7055,7 @@ pub fn vcltq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclt_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] @@ -7076,7 +7076,7 @@ pub fn vclt_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Compare unsigned less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcgt.u32"))] @@ -7097,7 +7097,7 @@ pub fn vcltq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltz_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclt.f16"))] #[cfg_attr( @@ -7120,7 +7120,7 @@ pub fn vcltz_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point compare less than"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcltzq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclt.f16"))] #[cfg_attr( @@ -7143,7 +7143,7 @@ pub fn vcltzq_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] @@ -7164,7 +7164,7 @@ pub fn vclz_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i8"))] @@ -7185,7 +7185,7 @@ pub fn vclzq_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] @@ -7206,7 +7206,7 @@ pub fn vclz_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i16"))] @@ -7227,7 +7227,7 @@ pub fn vclzq_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] @@ -7248,7 +7248,7 @@ pub fn vclz_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vclz.i32"))] @@ -7269,7 +7269,7 @@ pub fn vclzq_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7291,7 +7291,7 @@ pub fn vclz_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7317,7 +7317,7 @@ pub fn vclz_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7339,7 +7339,7 @@ pub fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7365,7 +7365,7 @@ pub fn vclzq_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7387,7 +7387,7 @@ pub fn vclz_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7413,7 +7413,7 @@ pub fn vclz_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7435,7 +7435,7 @@ pub fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7461,7 +7461,7 @@ pub fn vclzq_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7483,7 +7483,7 @@ pub fn vclz_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclz_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7509,7 +7509,7 @@ pub fn vclz_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7531,7 +7531,7 @@ pub fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Count leading zero bits"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vclzq_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7562,7 +7562,7 @@ pub fn vclzq_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] @@ -7583,7 +7583,7 @@ pub fn vcnt_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcnt))] @@ -7604,7 +7604,7 @@ pub fn vcntq_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7626,7 +7626,7 @@ pub fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7652,7 +7652,7 @@ pub fn vcnt_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7674,7 +7674,7 @@ pub fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7705,7 +7705,7 @@ pub fn vcntq_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7727,7 +7727,7 @@ pub fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { } #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcnt_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7753,7 +7753,7 @@ pub fn vcnt_p8(a: poly8x8_t) -> poly8x8_t { } #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7775,7 +7775,7 @@ pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { } #[doc = "Population count per byte."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcntq_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -7806,7 +7806,7 @@ pub fn vcntq_p8(a: poly8x16_t) -> poly8x16_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] @@ -7825,7 +7825,7 @@ pub fn vcombine_f16(a: float16x4_t, b: float16x4_t) -> float16x8_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7842,7 +7842,7 @@ pub fn vcombine_f32(a: float32x2_t, b: float32x2_t) -> float32x4_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7859,7 +7859,7 @@ pub fn vcombine_s8(a: int8x8_t, b: int8x8_t) -> int8x16_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7876,7 +7876,7 @@ pub fn vcombine_s16(a: int16x4_t, b: int16x4_t) -> int16x8_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7893,7 +7893,7 @@ pub fn vcombine_s32(a: int32x2_t, b: int32x2_t) -> int32x4_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7910,7 +7910,7 @@ pub fn vcombine_s64(a: int64x1_t, b: int64x1_t) -> int64x2_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7927,7 +7927,7 @@ pub fn vcombine_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x16_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7944,7 +7944,7 @@ pub fn vcombine_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x8_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7961,7 +7961,7 @@ pub fn vcombine_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x4_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7978,7 +7978,7 @@ pub fn vcombine_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x2_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -7995,7 +7995,7 @@ pub fn vcombine_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x16_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8012,7 +8012,7 @@ pub fn vcombine_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x8_t { } #[doc = "Join two smaller vectors into a single larger vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcombine_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8029,7 +8029,7 @@ pub fn vcombine_p64(a: poly64x1_t, b: poly64x1_t) -> poly64x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8053,7 +8053,7 @@ pub fn vcreate_f16(a: u64) -> float16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8080,7 +8080,7 @@ pub fn vcreate_f16(a: u64) -> float16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8102,7 +8102,7 @@ pub fn vcreate_f32(a: u64) -> float32x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8127,7 +8127,7 @@ pub fn vcreate_f32(a: u64) -> float32x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8149,7 +8149,7 @@ pub fn vcreate_s8(a: u64) -> int8x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8174,7 +8174,7 @@ pub fn vcreate_s8(a: u64) -> int8x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8196,7 +8196,7 @@ pub fn vcreate_s16(a: u64) -> int16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8221,7 +8221,7 @@ pub fn vcreate_s16(a: u64) -> int16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8243,7 +8243,7 @@ pub fn vcreate_s32(a: u64) -> int32x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8268,7 +8268,7 @@ pub fn vcreate_s32(a: u64) -> int32x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8289,7 +8289,7 @@ pub fn vcreate_s64(a: u64) -> int64x1_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8311,7 +8311,7 @@ pub fn vcreate_u8(a: u64) -> uint8x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8336,7 +8336,7 @@ pub fn vcreate_u8(a: u64) -> uint8x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8358,7 +8358,7 @@ pub fn vcreate_u16(a: u64) -> uint16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8383,7 +8383,7 @@ pub fn vcreate_u16(a: u64) -> uint16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8405,7 +8405,7 @@ pub fn vcreate_u32(a: u64) -> uint32x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8430,7 +8430,7 @@ pub fn vcreate_u32(a: u64) -> uint32x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8451,7 +8451,7 @@ pub fn vcreate_u64(a: u64) -> uint64x1_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8473,7 +8473,7 @@ pub fn vcreate_p8(a: u64) -> poly8x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8498,7 +8498,7 @@ pub fn vcreate_p8(a: u64) -> poly8x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8520,7 +8520,7 @@ pub fn vcreate_p16(a: u64) -> poly16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -8545,7 +8545,7 @@ pub fn vcreate_p16(a: u64) -> poly16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcreate_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -8566,7 +8566,7 @@ pub fn vcreate_p64(a: u64) -> poly64x1_t { } #[doc = "Floating-point convert to lower precision narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f16_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] @@ -8589,7 +8589,7 @@ pub fn vcvt_f16_f32(a: float32x4_t) -> float16x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f16_s16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( @@ -8611,7 +8611,7 @@ pub fn vcvt_f16_s16(a: int16x4_t) -> float16x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f16_s16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( @@ -8633,7 +8633,7 @@ pub fn vcvtq_f16_s16(a: int16x8_t) -> float16x8_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f16_u16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( @@ -8655,7 +8655,7 @@ pub fn vcvt_f16_u16(a: uint16x4_t) -> float16x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f16_u16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( @@ -8677,7 +8677,7 @@ pub fn vcvtq_f16_u16(a: uint16x8_t) -> float16x8_t { } #[doc = "Floating-point convert to higher precision long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] @@ -8700,7 +8700,7 @@ pub fn vcvt_f32_f16(a: float16x4_t) -> float32x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] @@ -8721,7 +8721,7 @@ pub fn vcvt_f32_s32(a: int32x2_t) -> float32x2_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] @@ -8742,7 +8742,7 @@ pub fn vcvtq_f32_s32(a: int32x4_t) -> float32x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_f32_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] @@ -8763,7 +8763,7 @@ pub fn vcvt_f32_u32(a: uint32x2_t) -> float32x2_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_f32_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] @@ -8784,7 +8784,7 @@ pub fn vcvtq_f32_u32(a: uint32x4_t) -> float32x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f16_s16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( @@ -8819,7 +8819,7 @@ pub fn vcvt_n_f16_s16(a: int16x4_t) -> float16x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f16_s16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( @@ -8854,7 +8854,7 @@ pub fn vcvtq_n_f16_s16(a: int16x8_t) -> float16x8_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f16_u16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( @@ -8889,7 +8889,7 @@ pub fn vcvt_n_f16_u16(a: uint16x4_t) -> float16x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f16_u16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( @@ -8924,7 +8924,7 @@ pub fn vcvtq_n_f16_u16(a: uint16x8_t) -> float16x8_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] @@ -8943,7 +8943,7 @@ pub fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] @@ -8962,7 +8962,7 @@ pub fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(scvtf, N = 2))] @@ -8981,7 +8981,7 @@ pub fn vcvt_n_f32_s32(a: int32x2_t) -> float32x2_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(scvtf, N = 2))] @@ -9000,7 +9000,7 @@ pub fn vcvtq_n_f32_s32(a: int32x4_t) -> float32x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] @@ -9019,7 +9019,7 @@ pub fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] @@ -9038,7 +9038,7 @@ pub fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_f32_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] @@ -9057,7 +9057,7 @@ pub fn vcvt_n_f32_u32(a: uint32x2_t) -> float32x2_t { } #[doc = "Fixed-point convert to floating-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_f32_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ucvtf, N = 2))] @@ -9076,7 +9076,7 @@ pub fn vcvtq_n_f32_u32(a: uint32x4_t) -> float32x4_t { } #[doc = "Floating-point convert to signed fixed-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( @@ -9111,7 +9111,7 @@ pub fn vcvt_n_s16_f16(a: float16x4_t) -> int16x4_t { } #[doc = "Floating-point convert to signed fixed-point"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( @@ -9146,7 +9146,7 @@ pub fn vcvtq_n_s16_f16(a: float16x8_t) -> int16x8_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] @@ -9165,7 +9165,7 @@ pub fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] @@ -9184,7 +9184,7 @@ pub fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] @@ -9203,7 +9203,7 @@ pub fn vcvt_n_s32_f32(a: float32x2_t) -> int32x2_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(fcvtzs, N = 2))] @@ -9222,7 +9222,7 @@ pub fn vcvtq_n_s32_f32(a: float32x4_t) -> int32x4_t { } #[doc = "Fixed-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( @@ -9257,7 +9257,7 @@ pub fn vcvt_n_u16_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Fixed-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vcvt", N = 1))] #[cfg_attr( @@ -9292,7 +9292,7 @@ pub fn vcvtq_n_u16_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] @@ -9311,7 +9311,7 @@ pub fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vcvt, N = 2))] @@ -9330,7 +9330,7 @@ pub fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_n_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] @@ -9349,7 +9349,7 @@ pub fn vcvt_n_u32_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point convert to fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_n_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(fcvtzu, N = 2))] @@ -9368,7 +9368,7 @@ pub fn vcvtq_n_u32_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( @@ -9390,7 +9390,7 @@ pub fn vcvt_s16_f16(a: float16x4_t) -> int16x4_t { } #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( @@ -9412,7 +9412,7 @@ pub fn vcvtq_s16_f16(a: float16x8_t) -> int16x8_t { } #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] @@ -9441,7 +9441,7 @@ pub fn vcvt_s32_f32(a: float32x2_t) -> int32x2_t { } #[doc = "Floating-point convert to signed fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_s32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] @@ -9470,7 +9470,7 @@ pub fn vcvtq_s32_f32(a: float32x4_t) -> int32x4_t { } #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( @@ -9492,7 +9492,7 @@ pub fn vcvt_u16_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u16_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] #[cfg_attr( @@ -9514,7 +9514,7 @@ pub fn vcvtq_u16_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvt_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] @@ -9543,7 +9543,7 @@ pub fn vcvt_u32_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Floating-point convert to unsigned fixed-point, rounding toward zero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vcvtq_u32_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vcvt))] @@ -9572,7 +9572,7 @@ pub fn vcvtq_u32_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9600,7 +9600,7 @@ pub fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9632,7 +9632,7 @@ pub fn vdot_lane_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9661,7 +9661,7 @@ pub fn vdotq_lane_s32(a: int32x4_t, b: int8x16_t, c: int8x8_t) } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9695,7 +9695,7 @@ pub fn vdotq_lane_s32(a: int32x4_t, b: int8x16_t, c: int8x8_t) } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9723,7 +9723,7 @@ pub fn vdot_lane_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_lane_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9755,7 +9755,7 @@ pub fn vdot_lane_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9784,7 +9784,7 @@ pub fn vdotq_lane_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x8_ } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_lane_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9818,7 +9818,7 @@ pub fn vdotq_lane_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x8_ } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9839,7 +9839,7 @@ pub fn vdot_laneq_s32(a: int32x2_t, b: int8x8_t, c: int8x16_t) } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9865,7 +9865,7 @@ pub fn vdot_laneq_s32(a: int32x2_t, b: int8x8_t, c: int8x16_t) } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9887,7 +9887,7 @@ pub fn vdotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9915,7 +9915,7 @@ pub fn vdotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9936,7 +9936,7 @@ pub fn vdot_laneq_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x16_ } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_laneq_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9962,7 +9962,7 @@ pub fn vdot_laneq_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x16_ } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -9984,7 +9984,7 @@ pub fn vdotq_laneq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x1 } #[doc = "Dot product arithmetic (indexed)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_laneq_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] @@ -10012,7 +10012,7 @@ pub fn vdotq_laneq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x1 } #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_s32)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] @@ -10041,7 +10041,7 @@ pub fn vdot_s32(a: int32x2_t, b: int8x8_t, c: int8x8_t) -> int32x2_t { } #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_s32)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsdot))] @@ -10070,7 +10070,7 @@ pub fn vdotq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { } #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdot_u32)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] @@ -10099,7 +10099,7 @@ pub fn vdot_u32(a: uint32x2_t, b: uint8x8_t, c: uint8x8_t) -> uint32x2_t { } #[doc = "Dot product arithmetic (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdotq_u32)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,dotprod")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vudot))] @@ -10128,7 +10128,7 @@ pub fn vdotq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] @@ -10153,7 +10153,7 @@ pub fn vdup_lane_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] @@ -10178,7 +10178,7 @@ pub fn vdupq_lane_f16(a: float16x4_t) -> float16x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] @@ -10201,7 +10201,7 @@ pub fn vdup_lane_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] @@ -10224,7 +10224,7 @@ pub fn vdup_lane_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] @@ -10247,7 +10247,7 @@ pub fn vdup_lane_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] @@ -10270,7 +10270,7 @@ pub fn vdupq_lane_f32(a: float32x2_t) -> float32x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] @@ -10293,7 +10293,7 @@ pub fn vdupq_lane_s32(a: int32x2_t) -> int32x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 1))] @@ -10316,7 +10316,7 @@ pub fn vdupq_lane_u32(a: uint32x2_t) -> uint32x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] @@ -10339,7 +10339,7 @@ pub fn vdup_lane_p16(a: poly16x4_t) -> poly16x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] @@ -10362,7 +10362,7 @@ pub fn vdup_lane_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] @@ -10385,7 +10385,7 @@ pub fn vdup_lane_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] @@ -10408,7 +10408,7 @@ pub fn vdupq_lane_p16(a: poly16x4_t) -> poly16x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] @@ -10431,7 +10431,7 @@ pub fn vdupq_lane_s16(a: int16x4_t) -> int16x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 2))] @@ -10454,7 +10454,7 @@ pub fn vdupq_lane_u16(a: uint16x4_t) -> uint16x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] @@ -10477,7 +10477,7 @@ pub fn vdup_lane_p8(a: poly8x8_t) -> poly8x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] @@ -10500,7 +10500,7 @@ pub fn vdup_lane_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] @@ -10523,7 +10523,7 @@ pub fn vdup_lane_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] @@ -10546,7 +10546,7 @@ pub fn vdupq_lane_p8(a: poly8x8_t) -> poly8x16_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] @@ -10569,7 +10569,7 @@ pub fn vdupq_lane_s8(a: int8x8_t) -> int8x16_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 4))] @@ -10592,7 +10592,7 @@ pub fn vdupq_lane_u8(a: uint8x8_t) -> uint8x16_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] @@ -10615,7 +10615,7 @@ pub fn vdup_lane_s64(a: int64x1_t) -> int64x1_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_lane_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] @@ -10638,7 +10638,7 @@ pub fn vdup_lane_u64(a: uint64x1_t) -> uint64x1_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] @@ -10663,7 +10663,7 @@ pub fn vdup_laneq_f16(a: float16x8_t) -> float16x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] @@ -10688,7 +10688,7 @@ pub fn vdupq_laneq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] @@ -10711,7 +10711,7 @@ pub fn vdup_laneq_f32(a: float32x4_t) -> float32x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] @@ -10734,7 +10734,7 @@ pub fn vdup_laneq_s32(a: int32x4_t) -> int32x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] @@ -10757,7 +10757,7 @@ pub fn vdup_laneq_u32(a: uint32x4_t) -> uint32x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] @@ -10780,7 +10780,7 @@ pub fn vdupq_laneq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] @@ -10803,7 +10803,7 @@ pub fn vdupq_laneq_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32", N = 2))] @@ -10826,7 +10826,7 @@ pub fn vdupq_laneq_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] @@ -10849,7 +10849,7 @@ pub fn vdup_laneq_p16(a: poly16x8_t) -> poly16x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] @@ -10872,7 +10872,7 @@ pub fn vdup_laneq_s16(a: int16x8_t) -> int16x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] @@ -10895,7 +10895,7 @@ pub fn vdup_laneq_u16(a: uint16x8_t) -> uint16x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] @@ -10918,7 +10918,7 @@ pub fn vdupq_laneq_p16(a: poly16x8_t) -> poly16x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] @@ -10941,7 +10941,7 @@ pub fn vdupq_laneq_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16", N = 4))] @@ -10964,7 +10964,7 @@ pub fn vdupq_laneq_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] @@ -10987,7 +10987,7 @@ pub fn vdup_laneq_p8(a: poly8x16_t) -> poly8x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] @@ -11010,7 +11010,7 @@ pub fn vdup_laneq_s8(a: int8x16_t) -> int8x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] @@ -11033,7 +11033,7 @@ pub fn vdup_laneq_u8(a: uint8x16_t) -> uint8x8_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] @@ -11056,7 +11056,7 @@ pub fn vdupq_laneq_p8(a: poly8x16_t) -> poly8x16_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] @@ -11079,7 +11079,7 @@ pub fn vdupq_laneq_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8", N = 8))] @@ -11102,7 +11102,7 @@ pub fn vdupq_laneq_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] @@ -11125,7 +11125,7 @@ pub fn vdup_laneq_s64(a: int64x2_t) -> int64x1_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_laneq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] @@ -11148,7 +11148,7 @@ pub fn vdup_laneq_u64(a: uint64x2_t) -> uint64x1_t { } #[doc = "Create a new vector with all lanes set to a value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -11164,7 +11164,7 @@ pub fn vdup_n_f16(a: f16) -> float16x4_t { } #[doc = "Create a new vector with all lanes set to a value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -11180,7 +11180,7 @@ pub fn vdupq_n_f16(a: f16) -> float16x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -11201,7 +11201,7 @@ pub fn vdup_n_f32(value: f32) -> float32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -11222,7 +11222,7 @@ pub fn vdup_n_p16(value: p16) -> poly16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -11243,7 +11243,7 @@ pub fn vdup_n_p8(value: p8) -> poly8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -11264,7 +11264,7 @@ pub fn vdup_n_s16(value: i16) -> int16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -11285,7 +11285,7 @@ pub fn vdup_n_s32(value: i32) -> int32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -11306,7 +11306,7 @@ pub fn vdup_n_s64(value: i64) -> int64x1_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -11327,7 +11327,7 @@ pub fn vdup_n_s8(value: i8) -> int8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -11348,7 +11348,7 @@ pub fn vdup_n_u16(value: u16) -> uint16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -11369,7 +11369,7 @@ pub fn vdup_n_u32(value: u32) -> uint32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -11390,7 +11390,7 @@ pub fn vdup_n_u64(value: u64) -> uint64x1_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -11411,7 +11411,7 @@ pub fn vdup_n_u8(value: u8) -> uint8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -11432,7 +11432,7 @@ pub fn vdupq_n_f32(value: f32) -> float32x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -11453,7 +11453,7 @@ pub fn vdupq_n_p16(value: p16) -> poly16x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -11474,7 +11474,7 @@ pub fn vdupq_n_p8(value: p8) -> poly8x16_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -11495,7 +11495,7 @@ pub fn vdupq_n_s16(value: i16) -> int16x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -11516,7 +11516,7 @@ pub fn vdupq_n_s32(value: i32) -> int32x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -11537,7 +11537,7 @@ pub fn vdupq_n_s64(value: i64) -> int64x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -11558,7 +11558,7 @@ pub fn vdupq_n_s8(value: i8) -> int8x16_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -11579,7 +11579,7 @@ pub fn vdupq_n_u16(value: u16) -> uint16x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -11600,7 +11600,7 @@ pub fn vdupq_n_u32(value: u32) -> uint32x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -11621,7 +11621,7 @@ pub fn vdupq_n_u64(value: u64) -> uint64x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -11642,7 +11642,7 @@ pub fn vdupq_n_u8(value: u8) -> uint8x16_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdup_n_f32_vfp4)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -11663,7 +11663,7 @@ fn vdup_n_f32_vfp4(value: f32) -> float32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_n_f32_vfp4)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -11684,7 +11684,7 @@ fn vdupq_n_f32_vfp4(value: f32) -> float32x4_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] @@ -11707,7 +11707,7 @@ pub fn vdupq_lane_s64(a: int64x1_t) -> int64x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_lane_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 0))] @@ -11730,7 +11730,7 @@ pub fn vdupq_lane_u64(a: uint64x1_t) -> uint64x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] @@ -11753,7 +11753,7 @@ pub fn vdupq_laneq_s64(a: int64x2_t) -> int64x2_t { } #[doc = "Set all vector lanes to the same value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vdupq_laneq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] @@ -11776,7 +11776,7 @@ pub fn vdupq_laneq_u64(a: uint64x2_t) -> uint64x2_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -11797,7 +11797,7 @@ pub fn veor_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -11818,7 +11818,7 @@ pub fn veorq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -11839,7 +11839,7 @@ pub fn veor_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -11860,7 +11860,7 @@ pub fn veorq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -11881,7 +11881,7 @@ pub fn veor_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -11902,7 +11902,7 @@ pub fn veorq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -11923,7 +11923,7 @@ pub fn veor_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -11944,7 +11944,7 @@ pub fn veorq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -11965,7 +11965,7 @@ pub fn veor_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -11986,7 +11986,7 @@ pub fn veorq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -12007,7 +12007,7 @@ pub fn veor_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -12028,7 +12028,7 @@ pub fn veorq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -12049,7 +12049,7 @@ pub fn veor_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -12070,7 +12070,7 @@ pub fn veorq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veor_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -12091,7 +12091,7 @@ pub fn veor_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Vector bitwise exclusive or (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/veorq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(veor))] @@ -12112,7 +12112,7 @@ pub fn veorq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] #[cfg_attr( @@ -12144,7 +12144,7 @@ pub fn vext_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] @@ -12173,7 +12173,7 @@ pub fn vext_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] @@ -12202,7 +12202,7 @@ pub fn vext_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 1))] @@ -12233,7 +12233,7 @@ pub fn vext_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] @@ -12258,7 +12258,7 @@ pub unsafe fn vext_s64(a: int64x1_t, _b: int64x1_t) -> int64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, N = 0))] @@ -12281,7 +12281,7 @@ pub unsafe fn vext_u64(a: uint64x1_t, _b: uint64x1_t) -> uint64x1_ } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] @@ -12316,7 +12316,7 @@ pub fn vext_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] @@ -12351,7 +12351,7 @@ pub fn vextq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] @@ -12386,7 +12386,7 @@ pub fn vext_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] @@ -12421,7 +12421,7 @@ pub fn vextq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] @@ -12456,7 +12456,7 @@ pub fn vext_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] @@ -12491,7 +12491,7 @@ pub fn vextq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 7))] #[cfg_attr( @@ -12527,7 +12527,7 @@ pub fn vextq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] @@ -12558,7 +12558,7 @@ pub fn vextq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] @@ -12589,7 +12589,7 @@ pub fn vext_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] @@ -12620,7 +12620,7 @@ pub fn vextq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] @@ -12651,7 +12651,7 @@ pub fn vext_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] @@ -12682,7 +12682,7 @@ pub fn vextq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vext_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 3))] @@ -12713,7 +12713,7 @@ pub fn vext_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] @@ -12742,7 +12742,7 @@ pub fn vextq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmov, N = 1))] @@ -12771,7 +12771,7 @@ pub fn vextq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] @@ -12874,7 +12874,7 @@ pub fn vextq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] @@ -12977,7 +12977,7 @@ pub fn vextq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Extract vector from pair of vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vextq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vext.8", N = 15))] @@ -13080,7 +13080,7 @@ pub fn vextq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Floating-point fused Multiply-Add to accumulator (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( @@ -13102,7 +13102,7 @@ pub fn vfma_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { } #[doc = "Floating-point fused Multiply-Add to accumulator (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] #[cfg_attr( @@ -13124,7 +13124,7 @@ pub fn vfmaq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t } #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] @@ -13145,7 +13145,7 @@ pub fn vfma_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { } #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] @@ -13166,7 +13166,7 @@ pub fn vfmaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t } #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfma_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] @@ -13187,7 +13187,7 @@ pub fn vfma_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { } #[doc = "Floating-point fused Multiply-Add to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmaq_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfma))] @@ -13208,7 +13208,7 @@ pub fn vfmaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -13234,7 +13234,7 @@ pub fn vfms_f16(a: float16x4_t, b: float16x4_t, c: float16x4_t) -> float16x4_t { } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -13260,7 +13260,7 @@ pub fn vfmsq_f16(a: float16x8_t, b: float16x8_t, c: float16x8_t) -> float16x8_t } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] @@ -13284,7 +13284,7 @@ pub fn vfms_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { } #[doc = "Floating-point fused multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] @@ -13308,7 +13308,7 @@ pub fn vfmsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t } #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfms_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] @@ -13329,7 +13329,7 @@ pub fn vfms_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { } #[doc = "Floating-point fused Multiply-subtract to accumulator(vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vfmsq_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "vfp4"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vfms))] @@ -13350,7 +13350,7 @@ pub fn vfmsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { } #[doc = "Duplicate vector element to vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] @@ -13369,7 +13369,7 @@ pub fn vget_high_f16(a: float16x8_t) -> float16x4_t { } #[doc = "Duplicate vector element to vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] @@ -13388,7 +13388,7 @@ pub fn vget_low_f16(a: float16x8_t) -> float16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13409,7 +13409,7 @@ pub fn vget_high_f32(a: float32x4_t) -> float32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13430,7 +13430,7 @@ pub fn vget_high_p16(a: poly16x8_t) -> poly16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13451,7 +13451,7 @@ pub fn vget_high_p8(a: poly8x16_t) -> poly8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13472,7 +13472,7 @@ pub fn vget_high_s16(a: int16x8_t) -> int16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13493,7 +13493,7 @@ pub fn vget_high_s32(a: int32x4_t) -> int32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13514,7 +13514,7 @@ pub fn vget_high_s8(a: int8x16_t) -> int8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13535,7 +13535,7 @@ pub fn vget_high_u16(a: uint16x8_t) -> uint16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13556,7 +13556,7 @@ pub fn vget_high_u32(a: uint32x4_t) -> uint32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13577,7 +13577,7 @@ pub fn vget_high_u8(a: uint8x16_t) -> uint8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13598,7 +13598,7 @@ pub fn vget_high_s64(a: int64x2_t) -> int64x1_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_high_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -13619,7 +13619,7 @@ pub fn vget_high_u64(a: uint64x2_t) -> uint64x1_t { } #[doc = "Duplicate vector element to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -13637,7 +13637,7 @@ pub fn vget_lane_f16(a: float16x4_t) -> f16 { } #[doc = "Duplicate vector element to scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -13655,7 +13655,7 @@ pub fn vgetq_lane_f16(a: float16x8_t) -> f16 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13674,7 +13674,7 @@ pub fn vget_lane_f32(v: float32x2_t) -> f32 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13693,7 +13693,7 @@ pub fn vget_lane_p16(v: poly16x4_t) -> p16 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13712,7 +13712,7 @@ pub fn vget_lane_p8(v: poly8x8_t) -> p8 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13731,7 +13731,7 @@ pub fn vget_lane_s16(v: int16x4_t) -> i16 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13750,7 +13750,7 @@ pub fn vget_lane_s32(v: int32x2_t) -> i32 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13769,7 +13769,7 @@ pub fn vget_lane_s8(v: int8x8_t) -> i8 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13788,7 +13788,7 @@ pub fn vget_lane_u16(v: uint16x4_t) -> u16 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13807,7 +13807,7 @@ pub fn vget_lane_u32(v: uint32x2_t) -> u32 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13826,7 +13826,7 @@ pub fn vget_lane_u8(v: uint8x8_t) -> u8 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13845,7 +13845,7 @@ pub fn vgetq_lane_f32(v: float32x4_t) -> f32 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13864,7 +13864,7 @@ pub fn vgetq_lane_p16(v: poly16x8_t) -> p16 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13883,7 +13883,7 @@ pub fn vgetq_lane_p64(v: poly64x2_t) -> p64 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13902,7 +13902,7 @@ pub fn vgetq_lane_p8(v: poly8x16_t) -> p8 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13921,7 +13921,7 @@ pub fn vgetq_lane_s16(v: int16x8_t) -> i16 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13940,7 +13940,7 @@ pub fn vgetq_lane_s32(v: int32x4_t) -> i32 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13959,7 +13959,7 @@ pub fn vgetq_lane_s64(v: int64x2_t) -> i64 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13978,7 +13978,7 @@ pub fn vgetq_lane_s8(v: int8x16_t) -> i8 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -13997,7 +13997,7 @@ pub fn vgetq_lane_u16(v: uint16x8_t) -> u16 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -14016,7 +14016,7 @@ pub fn vgetq_lane_u32(v: uint32x4_t) -> u32 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -14035,7 +14035,7 @@ pub fn vgetq_lane_u64(v: uint64x2_t) -> u64 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vgetq_lane_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -14054,7 +14054,7 @@ pub fn vgetq_lane_u8(v: uint8x16_t) -> u8 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -14073,7 +14073,7 @@ pub fn vget_lane_p64(v: poly64x1_t) -> p64 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -14092,7 +14092,7 @@ pub fn vget_lane_s64(v: int64x1_t) -> i64 { } #[doc = "Move vector element to general-purpose register"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_lane_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(1)] @@ -14111,7 +14111,7 @@ pub fn vget_lane_u64(v: uint64x1_t) -> u64 { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14128,7 +14128,7 @@ pub fn vget_low_f32(a: float32x4_t) -> float32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14145,7 +14145,7 @@ pub fn vget_low_p16(a: poly16x8_t) -> poly16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14162,7 +14162,7 @@ pub fn vget_low_p8(a: poly8x16_t) -> poly8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14179,7 +14179,7 @@ pub fn vget_low_s16(a: int16x8_t) -> int16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14196,7 +14196,7 @@ pub fn vget_low_s32(a: int32x4_t) -> int32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14213,7 +14213,7 @@ pub fn vget_low_s8(a: int8x16_t) -> int8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14230,7 +14230,7 @@ pub fn vget_low_u16(a: uint16x8_t) -> uint16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14247,7 +14247,7 @@ pub fn vget_low_u32(a: uint32x4_t) -> uint32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14264,7 +14264,7 @@ pub fn vget_low_u8(a: uint8x16_t) -> uint8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14281,7 +14281,7 @@ pub fn vget_low_s64(a: int64x2_t) -> int64x1_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vget_low_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(nop))] @@ -14298,7 +14298,7 @@ pub fn vget_low_u64(a: uint64x2_t) -> uint64x1_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] @@ -14327,7 +14327,7 @@ pub fn vhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s8"))] @@ -14356,7 +14356,7 @@ pub fn vhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] @@ -14385,7 +14385,7 @@ pub fn vhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s16"))] @@ -14414,7 +14414,7 @@ pub fn vhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] @@ -14443,7 +14443,7 @@ pub fn vhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.s32"))] @@ -14472,7 +14472,7 @@ pub fn vhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] @@ -14501,7 +14501,7 @@ pub fn vhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u8"))] @@ -14530,7 +14530,7 @@ pub fn vhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] @@ -14559,7 +14559,7 @@ pub fn vhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u16"))] @@ -14588,7 +14588,7 @@ pub fn vhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhadd_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] @@ -14617,7 +14617,7 @@ pub fn vhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhaddq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhadd.u32"))] @@ -14646,7 +14646,7 @@ pub fn vhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] @@ -14675,7 +14675,7 @@ pub fn vhsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s16"))] @@ -14704,7 +14704,7 @@ pub fn vhsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] @@ -14733,7 +14733,7 @@ pub fn vhsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s32"))] @@ -14762,7 +14762,7 @@ pub fn vhsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] @@ -14791,7 +14791,7 @@ pub fn vhsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.s8"))] @@ -14820,7 +14820,7 @@ pub fn vhsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] @@ -14849,7 +14849,7 @@ pub fn vhsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u8"))] @@ -14878,7 +14878,7 @@ pub fn vhsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] @@ -14907,7 +14907,7 @@ pub fn vhsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u16"))] @@ -14936,7 +14936,7 @@ pub fn vhsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsub_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] @@ -14965,7 +14965,7 @@ pub fn vhsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Signed halving subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vhsubq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vhsub.u32"))] @@ -14996,7 +14996,7 @@ pub fn vhsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] @@ -15015,7 +15015,7 @@ pub unsafe fn vld1_dup_f16(ptr: *const f16) -> float16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] @@ -15034,7 +15034,7 @@ pub unsafe fn vld1q_dup_f16(ptr: *const f16) -> float16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] @@ -15057,7 +15057,7 @@ pub unsafe fn vld1_dup_f32(ptr: *const f32) -> float32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] @@ -15080,7 +15080,7 @@ pub unsafe fn vld1_dup_p16(ptr: *const p16) -> poly16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] @@ -15103,7 +15103,7 @@ pub unsafe fn vld1_dup_p8(ptr: *const p8) -> poly8x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] @@ -15126,7 +15126,7 @@ pub unsafe fn vld1_dup_s16(ptr: *const i16) -> int16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] @@ -15149,7 +15149,7 @@ pub unsafe fn vld1_dup_s32(ptr: *const i32) -> int32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] @@ -15172,7 +15172,7 @@ pub unsafe fn vld1_dup_s8(ptr: *const i8) -> int8x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] @@ -15195,7 +15195,7 @@ pub unsafe fn vld1_dup_u16(ptr: *const u16) -> uint16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] @@ -15218,7 +15218,7 @@ pub unsafe fn vld1_dup_u32(ptr: *const u32) -> uint32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] @@ -15241,7 +15241,7 @@ pub unsafe fn vld1_dup_u8(ptr: *const u8) -> uint8x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] @@ -15264,7 +15264,7 @@ pub unsafe fn vld1q_dup_f32(ptr: *const f32) -> float32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] @@ -15287,7 +15287,7 @@ pub unsafe fn vld1q_dup_p16(ptr: *const p16) -> poly16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] @@ -15310,7 +15310,7 @@ pub unsafe fn vld1q_dup_p8(ptr: *const p8) -> poly8x16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] @@ -15333,7 +15333,7 @@ pub unsafe fn vld1q_dup_s16(ptr: *const i16) -> int16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] @@ -15356,7 +15356,7 @@ pub unsafe fn vld1q_dup_s32(ptr: *const i32) -> int32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] @@ -15379,7 +15379,7 @@ pub unsafe fn vld1q_dup_s64(ptr: *const i64) -> int64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] @@ -15402,7 +15402,7 @@ pub unsafe fn vld1q_dup_s8(ptr: *const i8) -> int8x16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.16"))] @@ -15425,7 +15425,7 @@ pub unsafe fn vld1q_dup_u16(ptr: *const u16) -> uint16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.32"))] @@ -15448,7 +15448,7 @@ pub unsafe fn vld1q_dup_u32(ptr: *const u32) -> uint32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vldr"))] @@ -15471,7 +15471,7 @@ pub unsafe fn vld1q_dup_u64(ptr: *const u64) -> uint64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vld1.8"))] @@ -15494,7 +15494,7 @@ pub unsafe fn vld1q_dup_u8(ptr: *const u8) -> uint8x16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] @@ -15526,7 +15526,7 @@ pub unsafe fn vld1_dup_p64(ptr: *const p64) -> poly64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] @@ -15558,7 +15558,7 @@ pub unsafe fn vld1_dup_s64(ptr: *const i64) -> int64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] @@ -15590,7 +15590,7 @@ pub unsafe fn vld1_dup_u64(ptr: *const u64) -> uint64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -15608,7 +15608,7 @@ pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -15627,7 +15627,7 @@ pub unsafe fn vld1_f16(ptr: *const f16) -> float16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -15645,7 +15645,7 @@ pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -15664,7 +15664,7 @@ pub unsafe fn vld1q_f16(ptr: *const f16) -> float16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] @@ -15682,7 +15682,7 @@ pub unsafe fn vld1_f16_x2(a: *const f16) -> float16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] @@ -15700,7 +15700,7 @@ pub unsafe fn vld1_f16_x3(a: *const f16) -> float16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] @@ -15718,7 +15718,7 @@ pub unsafe fn vld1_f16_x4(a: *const f16) -> float16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] @@ -15736,7 +15736,7 @@ pub unsafe fn vld1q_f16_x2(a: *const f16) -> float16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] @@ -15754,7 +15754,7 @@ pub unsafe fn vld1q_f16_x3(a: *const f16) -> float16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1))] @@ -15772,7 +15772,7 @@ pub unsafe fn vld1q_f16_x4(a: *const f16) -> float16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15785,7 +15785,7 @@ pub unsafe fn vld1_f32(ptr: *const f32) -> float32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15798,7 +15798,7 @@ pub unsafe fn vld1q_f32(ptr: *const f32) -> float32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15811,7 +15811,7 @@ pub unsafe fn vld1_u8(ptr: *const u8) -> uint8x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15824,7 +15824,7 @@ pub unsafe fn vld1q_u8(ptr: *const u8) -> uint8x16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15837,7 +15837,7 @@ pub unsafe fn vld1_u16(ptr: *const u16) -> uint16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15850,7 +15850,7 @@ pub unsafe fn vld1q_u16(ptr: *const u16) -> uint16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15863,7 +15863,7 @@ pub unsafe fn vld1_u32(ptr: *const u32) -> uint32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15876,7 +15876,7 @@ pub unsafe fn vld1q_u32(ptr: *const u32) -> uint32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15889,7 +15889,7 @@ pub unsafe fn vld1_u64(ptr: *const u64) -> uint64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15902,7 +15902,7 @@ pub unsafe fn vld1q_u64(ptr: *const u64) -> uint64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15915,7 +15915,7 @@ pub unsafe fn vld1_p8(ptr: *const p8) -> poly8x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15928,7 +15928,7 @@ pub unsafe fn vld1q_p8(ptr: *const p8) -> poly8x16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15941,7 +15941,7 @@ pub unsafe fn vld1_p16(ptr: *const p16) -> poly16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15954,7 +15954,7 @@ pub unsafe fn vld1q_p16(ptr: *const p16) -> poly16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,aes")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -15967,7 +15967,7 @@ pub unsafe fn vld1q_p64(ptr: *const p64) -> poly64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -15990,7 +15990,7 @@ pub unsafe fn vld1_f32_x2(a: *const f32) -> float32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -16013,7 +16013,7 @@ pub unsafe fn vld1_f32_x3(a: *const f32) -> float32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -16036,7 +16036,7 @@ pub unsafe fn vld1_f32_x4(a: *const f32) -> float32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -16059,7 +16059,7 @@ pub unsafe fn vld1q_f32_x2(a: *const f32) -> float32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -16082,7 +16082,7 @@ pub unsafe fn vld1q_f32_x3(a: *const f32) -> float32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -16105,7 +16105,7 @@ pub unsafe fn vld1q_f32_x4(a: *const f32) -> float32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1, LANE = 0))] @@ -16125,7 +16125,7 @@ pub unsafe fn vld1_lane_f16(ptr: *const f16, src: float16x4_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld1, LANE = 0))] @@ -16145,7 +16145,7 @@ pub unsafe fn vld1q_lane_f16(ptr: *const f16, src: float16x8_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16170,7 +16170,7 @@ pub unsafe fn vld1_lane_f32(ptr: *const f32, src: float32x2_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16195,7 +16195,7 @@ pub unsafe fn vld1_lane_p16(ptr: *const p16, src: poly16x4_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16220,7 +16220,7 @@ pub unsafe fn vld1_lane_p8(ptr: *const p8, src: poly8x8_t) -> p #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16245,7 +16245,7 @@ pub unsafe fn vld1_lane_s16(ptr: *const i16, src: int16x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16270,7 +16270,7 @@ pub unsafe fn vld1_lane_s32(ptr: *const i32, src: int32x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16295,7 +16295,7 @@ pub unsafe fn vld1_lane_s64(ptr: *const i64, src: int64x1_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16320,7 +16320,7 @@ pub unsafe fn vld1_lane_s8(ptr: *const i8, src: int8x8_t) -> in #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16345,7 +16345,7 @@ pub unsafe fn vld1_lane_u16(ptr: *const u16, src: uint16x4_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16370,7 +16370,7 @@ pub unsafe fn vld1_lane_u32(ptr: *const u32, src: uint32x2_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16395,7 +16395,7 @@ pub unsafe fn vld1_lane_u64(ptr: *const u64, src: uint64x1_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16420,7 +16420,7 @@ pub unsafe fn vld1_lane_u8(ptr: *const u8, src: uint8x8_t) -> u #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16445,7 +16445,7 @@ pub unsafe fn vld1q_lane_f32(ptr: *const f32, src: float32x4_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16470,7 +16470,7 @@ pub unsafe fn vld1q_lane_p16(ptr: *const p16, src: poly16x8_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16495,7 +16495,7 @@ pub unsafe fn vld1q_lane_p8(ptr: *const p8, src: poly8x16_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16520,7 +16520,7 @@ pub unsafe fn vld1q_lane_s16(ptr: *const i16, src: int16x8_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16545,7 +16545,7 @@ pub unsafe fn vld1q_lane_s32(ptr: *const i32, src: int32x4_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16570,7 +16570,7 @@ pub unsafe fn vld1q_lane_s64(ptr: *const i64, src: int64x2_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16595,7 +16595,7 @@ pub unsafe fn vld1q_lane_s8(ptr: *const i8, src: int8x16_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16620,7 +16620,7 @@ pub unsafe fn vld1q_lane_u16(ptr: *const u16, src: uint16x8_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16645,7 +16645,7 @@ pub unsafe fn vld1q_lane_u32(ptr: *const u32, src: uint32x4_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16670,7 +16670,7 @@ pub unsafe fn vld1q_lane_u64(ptr: *const u64, src: uint64x2_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16695,7 +16695,7 @@ pub unsafe fn vld1q_lane_u8(ptr: *const u8, src: uint8x16_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16720,7 +16720,7 @@ pub unsafe fn vld1_lane_p64(ptr: *const p64, src: poly64x1_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[rustc_legacy_const_generics(2)] @@ -16745,7 +16745,7 @@ pub unsafe fn vld1q_lane_p64(ptr: *const p64, src: poly64x2_t) #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,aes")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -16763,7 +16763,7 @@ pub unsafe fn vld1_p64(ptr: *const p64) -> poly64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -16786,7 +16786,7 @@ pub unsafe fn vld1_p64_x2(a: *const p64) -> poly64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -16809,7 +16809,7 @@ pub unsafe fn vld1_p64_x3(a: *const p64) -> poly64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -16832,7 +16832,7 @@ pub unsafe fn vld1_p64_x4(a: *const p64) -> poly64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -16855,7 +16855,7 @@ pub unsafe fn vld1q_p64_x2(a: *const p64) -> poly64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -16878,7 +16878,7 @@ pub unsafe fn vld1q_p64_x3(a: *const p64) -> poly64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -16901,7 +16901,7 @@ pub unsafe fn vld1q_p64_x4(a: *const p64) -> poly64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -16914,7 +16914,7 @@ pub unsafe fn vld1_s8(ptr: *const i8) -> int8x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -16927,7 +16927,7 @@ pub unsafe fn vld1q_s8(ptr: *const i8) -> int8x16_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -16940,7 +16940,7 @@ pub unsafe fn vld1_s16(ptr: *const i16) -> int16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -16953,7 +16953,7 @@ pub unsafe fn vld1q_s16(ptr: *const i16) -> int16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -16966,7 +16966,7 @@ pub unsafe fn vld1_s32(ptr: *const i32) -> int32x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -16979,7 +16979,7 @@ pub unsafe fn vld1q_s32(ptr: *const i32) -> int32x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -16992,7 +16992,7 @@ pub unsafe fn vld1_s64(ptr: *const i64) -> int64x1_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -17005,7 +17005,7 @@ pub unsafe fn vld1q_s64(ptr: *const i64) -> int64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17028,7 +17028,7 @@ pub unsafe fn vld1_s8_x2(a: *const i8) -> int8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17051,7 +17051,7 @@ pub unsafe fn vld1_s8_x3(a: *const i8) -> int8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17074,7 +17074,7 @@ pub unsafe fn vld1_s8_x4(a: *const i8) -> int8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17097,7 +17097,7 @@ pub unsafe fn vld1q_s8_x2(a: *const i8) -> int8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17120,7 +17120,7 @@ pub unsafe fn vld1q_s8_x3(a: *const i8) -> int8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17143,7 +17143,7 @@ pub unsafe fn vld1q_s8_x4(a: *const i8) -> int8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17166,7 +17166,7 @@ pub unsafe fn vld1_s16_x2(a: *const i16) -> int16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17189,7 +17189,7 @@ pub unsafe fn vld1_s16_x3(a: *const i16) -> int16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17212,7 +17212,7 @@ pub unsafe fn vld1_s16_x4(a: *const i16) -> int16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17235,7 +17235,7 @@ pub unsafe fn vld1q_s16_x2(a: *const i16) -> int16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17258,7 +17258,7 @@ pub unsafe fn vld1q_s16_x3(a: *const i16) -> int16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17281,7 +17281,7 @@ pub unsafe fn vld1q_s16_x4(a: *const i16) -> int16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17304,7 +17304,7 @@ pub unsafe fn vld1_s32_x2(a: *const i32) -> int32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17327,7 +17327,7 @@ pub unsafe fn vld1_s32_x3(a: *const i32) -> int32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17350,7 +17350,7 @@ pub unsafe fn vld1_s32_x4(a: *const i32) -> int32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17373,7 +17373,7 @@ pub unsafe fn vld1q_s32_x2(a: *const i32) -> int32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17396,7 +17396,7 @@ pub unsafe fn vld1q_s32_x3(a: *const i32) -> int32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17419,7 +17419,7 @@ pub unsafe fn vld1q_s32_x4(a: *const i32) -> int32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17442,7 +17442,7 @@ pub unsafe fn vld1_s64_x2(a: *const i64) -> int64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17465,7 +17465,7 @@ pub unsafe fn vld1_s64_x3(a: *const i64) -> int64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17488,7 +17488,7 @@ pub unsafe fn vld1_s64_x4(a: *const i64) -> int64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17511,7 +17511,7 @@ pub unsafe fn vld1q_s64_x2(a: *const i64) -> int64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17534,7 +17534,7 @@ pub unsafe fn vld1q_s64_x3(a: *const i64) -> int64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17557,7 +17557,7 @@ pub unsafe fn vld1q_s64_x4(a: *const i64) -> int64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17580,7 +17580,7 @@ pub unsafe fn vld1_u8_x2(a: *const u8) -> uint8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17603,7 +17603,7 @@ pub unsafe fn vld1_u8_x3(a: *const u8) -> uint8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17626,7 +17626,7 @@ pub unsafe fn vld1_u8_x4(a: *const u8) -> uint8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17649,7 +17649,7 @@ pub unsafe fn vld1q_u8_x2(a: *const u8) -> uint8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17672,7 +17672,7 @@ pub unsafe fn vld1q_u8_x3(a: *const u8) -> uint8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17695,7 +17695,7 @@ pub unsafe fn vld1q_u8_x4(a: *const u8) -> uint8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17718,7 +17718,7 @@ pub unsafe fn vld1_u16_x2(a: *const u16) -> uint16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17741,7 +17741,7 @@ pub unsafe fn vld1_u16_x3(a: *const u16) -> uint16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17764,7 +17764,7 @@ pub unsafe fn vld1_u16_x4(a: *const u16) -> uint16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17787,7 +17787,7 @@ pub unsafe fn vld1q_u16_x2(a: *const u16) -> uint16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17810,7 +17810,7 @@ pub unsafe fn vld1q_u16_x3(a: *const u16) -> uint16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17833,7 +17833,7 @@ pub unsafe fn vld1q_u16_x4(a: *const u16) -> uint16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17856,7 +17856,7 @@ pub unsafe fn vld1_u32_x2(a: *const u32) -> uint32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17879,7 +17879,7 @@ pub unsafe fn vld1_u32_x3(a: *const u32) -> uint32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17902,7 +17902,7 @@ pub unsafe fn vld1_u32_x4(a: *const u32) -> uint32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17925,7 +17925,7 @@ pub unsafe fn vld1q_u32_x2(a: *const u32) -> uint32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17948,7 +17948,7 @@ pub unsafe fn vld1q_u32_x3(a: *const u32) -> uint32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17971,7 +17971,7 @@ pub unsafe fn vld1q_u32_x4(a: *const u32) -> uint32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -17994,7 +17994,7 @@ pub unsafe fn vld1_u64_x2(a: *const u64) -> uint64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18017,7 +18017,7 @@ pub unsafe fn vld1_u64_x3(a: *const u64) -> uint64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18040,7 +18040,7 @@ pub unsafe fn vld1_u64_x4(a: *const u64) -> uint64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18063,7 +18063,7 @@ pub unsafe fn vld1q_u64_x2(a: *const u64) -> uint64x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18086,7 +18086,7 @@ pub unsafe fn vld1q_u64_x3(a: *const u64) -> uint64x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18109,7 +18109,7 @@ pub unsafe fn vld1q_u64_x4(a: *const u64) -> uint64x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18132,7 +18132,7 @@ pub unsafe fn vld1_p8_x2(a: *const p8) -> poly8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18155,7 +18155,7 @@ pub unsafe fn vld1_p8_x3(a: *const p8) -> poly8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18178,7 +18178,7 @@ pub unsafe fn vld1_p8_x4(a: *const p8) -> poly8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18201,7 +18201,7 @@ pub unsafe fn vld1q_p8_x2(a: *const p8) -> poly8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18224,7 +18224,7 @@ pub unsafe fn vld1q_p8_x3(a: *const p8) -> poly8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18247,7 +18247,7 @@ pub unsafe fn vld1q_p8_x4(a: *const p8) -> poly8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18270,7 +18270,7 @@ pub unsafe fn vld1_p16_x2(a: *const p16) -> poly16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18293,7 +18293,7 @@ pub unsafe fn vld1_p16_x3(a: *const p16) -> poly16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18316,7 +18316,7 @@ pub unsafe fn vld1_p16_x4(a: *const p16) -> poly16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18339,7 +18339,7 @@ pub unsafe fn vld1q_p16_x2(a: *const p16) -> poly16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18362,7 +18362,7 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld))] @@ -18381,7 +18381,7 @@ pub unsafe fn vld1q_p16_x3(a: *const p16) -> poly16x8x3_t { pub unsafe fn vld1q_p16_x4(a: *const p16) -> poly16x8x4_t { crate::ptr::read_unaligned(a.cast()) } -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -18394,7 +18394,7 @@ unsafe fn vld1_v1i64(a: *const i8) -> int64x1_t { } _vld1_v1i64(a, ALIGN) } -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -18407,7 +18407,7 @@ unsafe fn vld1_v2f32(a: *const i8) -> float32x2_t { } _vld1_v2f32(a, ALIGN) } -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -18420,7 +18420,7 @@ unsafe fn vld1_v2i32(a: *const i8) -> int32x2_t { } _vld1_v2i32(a, ALIGN) } -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -18433,7 +18433,7 @@ unsafe fn vld1_v4i16(a: *const i8) -> int16x4_t { } _vld1_v4i16(a, ALIGN) } -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -18446,7 +18446,7 @@ unsafe fn vld1_v8i8(a: *const i8) -> int8x8_t { } _vld1_v8i8(a, ALIGN) } -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -18459,7 +18459,7 @@ unsafe fn vld1q_v16i8(a: *const i8) -> int8x16_t { } _vld1q_v16i8(a, ALIGN) } -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -18472,7 +18472,7 @@ unsafe fn vld1q_v2i64(a: *const i8) -> int64x2_t { } _vld1q_v2i64(a, ALIGN) } -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -18485,7 +18485,7 @@ unsafe fn vld1q_v4f32(a: *const i8) -> float32x4_t { } _vld1q_v4f32(a, ALIGN) } -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -18498,7 +18498,7 @@ unsafe fn vld1q_v4i32(a: *const i8) -> int32x4_t { } _vld1q_v4i32(a, ALIGN) } -#[inline(always)] +#[inline] #[rustc_legacy_const_generics(1)] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] @@ -18511,7 +18511,7 @@ unsafe fn vld1q_v8i16(a: *const i8) -> int16x8_t { } _vld1q_v8i16(a, ALIGN) } -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -18525,7 +18525,7 @@ unsafe fn vld1_v4f16(a: *const i8, b: i32) -> float16x4_t { } _vld1_v4f16(a, b) } -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -18543,7 +18543,7 @@ unsafe fn vld1q_v8f16(a: *const i8, b: i32) -> float16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld1q_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vldr))] @@ -18567,7 +18567,7 @@ pub unsafe fn vld1q_dup_p64(ptr: *const p64) -> poly64x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] @@ -18586,7 +18586,7 @@ pub unsafe fn vld2_dup_f16(a: *const f16) -> float16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] @@ -18605,7 +18605,7 @@ pub unsafe fn vld2q_dup_f16(a: *const f16) -> float16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -18629,7 +18629,7 @@ pub unsafe fn vld2_dup_f16(a: *const f16) -> float16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -18653,7 +18653,7 @@ pub unsafe fn vld2q_dup_f16(a: *const f16) -> float16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -18669,7 +18669,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -18685,7 +18685,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -18701,7 +18701,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -18717,7 +18717,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -18733,7 +18733,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -18749,7 +18749,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -18765,7 +18765,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -18781,7 +18781,7 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18800,7 +18800,7 @@ pub unsafe fn vld2_dup_f32(a: *const f32) -> float32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18819,7 +18819,7 @@ pub unsafe fn vld2q_dup_f32(a: *const f32) -> float32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18838,7 +18838,7 @@ pub unsafe fn vld2_dup_s8(a: *const i8) -> int8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18857,7 +18857,7 @@ pub unsafe fn vld2q_dup_s8(a: *const i8) -> int8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18876,7 +18876,7 @@ pub unsafe fn vld2_dup_s16(a: *const i16) -> int16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18895,7 +18895,7 @@ pub unsafe fn vld2q_dup_s16(a: *const i16) -> int16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18914,7 +18914,7 @@ pub unsafe fn vld2_dup_s32(a: *const i32) -> int32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18933,7 +18933,7 @@ pub unsafe fn vld2q_dup_s32(a: *const i32) -> int32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -18956,7 +18956,7 @@ pub unsafe fn vld2_dup_p64(a: *const p64) -> poly64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -18972,7 +18972,7 @@ pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -18991,7 +18991,7 @@ pub unsafe fn vld2_dup_s64(a: *const i64) -> int64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -19014,7 +19014,7 @@ pub unsafe fn vld2_dup_u64(a: *const u64) -> uint64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19038,7 +19038,7 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19065,7 +19065,7 @@ pub unsafe fn vld2_dup_u8(a: *const u8) -> uint8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19089,7 +19089,7 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19124,7 +19124,7 @@ pub unsafe fn vld2q_dup_u8(a: *const u8) -> uint8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19148,7 +19148,7 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19175,7 +19175,7 @@ pub unsafe fn vld2_dup_u16(a: *const u16) -> uint16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19199,7 +19199,7 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19226,7 +19226,7 @@ pub unsafe fn vld2q_dup_u16(a: *const u16) -> uint16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19250,7 +19250,7 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19277,7 +19277,7 @@ pub unsafe fn vld2_dup_u32(a: *const u32) -> uint32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19301,7 +19301,7 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19328,7 +19328,7 @@ pub unsafe fn vld2q_dup_u32(a: *const u32) -> uint32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19352,7 +19352,7 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19379,7 +19379,7 @@ pub unsafe fn vld2_dup_p8(a: *const p8) -> poly8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19403,7 +19403,7 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19438,7 +19438,7 @@ pub unsafe fn vld2q_dup_p8(a: *const p8) -> poly8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19462,7 +19462,7 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19489,7 +19489,7 @@ pub unsafe fn vld2_dup_p16(a: *const p16) -> poly16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19513,7 +19513,7 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -19540,7 +19540,7 @@ pub unsafe fn vld2q_dup_p16(a: *const p16) -> poly16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] @@ -19559,7 +19559,7 @@ pub unsafe fn vld2_f16(a: *const f16) -> float16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] @@ -19578,7 +19578,7 @@ pub unsafe fn vld2q_f16(a: *const f16) -> float16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -19602,7 +19602,7 @@ pub unsafe fn vld2_f16(a: *const f16) -> float16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -19626,7 +19626,7 @@ pub unsafe fn vld2q_f16(a: *const f16) -> float16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -19642,7 +19642,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -19658,7 +19658,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -19674,7 +19674,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -19690,7 +19690,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -19706,7 +19706,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -19722,7 +19722,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -19738,7 +19738,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -19754,7 +19754,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19773,7 +19773,7 @@ pub unsafe fn vld2_f32(a: *const f32) -> float32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19792,7 +19792,7 @@ pub unsafe fn vld2q_f32(a: *const f32) -> float32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19811,7 +19811,7 @@ pub unsafe fn vld2_s8(a: *const i8) -> int8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19830,7 +19830,7 @@ pub unsafe fn vld2q_s8(a: *const i8) -> int8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19849,7 +19849,7 @@ pub unsafe fn vld2_s16(a: *const i16) -> int16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19868,7 +19868,7 @@ pub unsafe fn vld2q_s16(a: *const i16) -> int16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19887,7 +19887,7 @@ pub unsafe fn vld2_s32(a: *const i32) -> int32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -19906,7 +19906,7 @@ pub unsafe fn vld2q_s32(a: *const i32) -> int32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] @@ -19932,7 +19932,7 @@ pub unsafe fn vld2_lane_f16(a: *const f16, b: float16x4x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] @@ -19958,7 +19958,7 @@ pub unsafe fn vld2q_lane_f16(a: *const f16, b: float16x8x2_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -19985,7 +19985,7 @@ pub unsafe fn vld2_lane_f16(a: *const f16, b: float16x4x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -20016,7 +20016,7 @@ pub unsafe fn vld2q_lane_f16(a: *const f16, b: float16x8x2_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] @@ -20037,7 +20037,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] @@ -20059,7 +20059,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] @@ -20080,7 +20080,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] @@ -20101,7 +20101,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] @@ -20122,7 +20122,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] @@ -20143,7 +20143,7 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld2, LANE = 0))] @@ -20164,7 +20164,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] @@ -20188,7 +20188,7 @@ pub unsafe fn vld2_lane_f32(a: *const f32, b: float32x2x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] @@ -20212,7 +20212,7 @@ pub unsafe fn vld2q_lane_f32(a: *const f32, b: float32x4x2_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] @@ -20236,7 +20236,7 @@ pub unsafe fn vld2q_lane_s16(a: *const i16, b: int16x8x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] @@ -20260,7 +20260,7 @@ pub unsafe fn vld2q_lane_s32(a: *const i32, b: int32x4x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] @@ -20279,7 +20279,7 @@ pub unsafe fn vld2_lane_s8(a: *const i8, b: int8x8x2_t) -> int8 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] @@ -20303,7 +20303,7 @@ pub unsafe fn vld2_lane_s16(a: *const i16, b: int16x4x2_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld2, LANE = 0))] @@ -20327,7 +20327,7 @@ pub unsafe fn vld2_lane_s32(a: *const i32, b: int32x2x2_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] @@ -20352,7 +20352,7 @@ pub unsafe fn vld2_lane_u8(a: *const u8, b: uint8x8x2_t) -> uin #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] @@ -20377,7 +20377,7 @@ pub unsafe fn vld2_lane_u16(a: *const u16, b: uint16x4x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] @@ -20402,7 +20402,7 @@ pub unsafe fn vld2q_lane_u16(a: *const u16, b: uint16x8x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] @@ -20427,7 +20427,7 @@ pub unsafe fn vld2_lane_u32(a: *const u32, b: uint32x2x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] @@ -20452,7 +20452,7 @@ pub unsafe fn vld2q_lane_u32(a: *const u32, b: uint32x4x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] @@ -20477,7 +20477,7 @@ pub unsafe fn vld2_lane_p8(a: *const p8, b: poly8x8x2_t) -> pol #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] @@ -20502,7 +20502,7 @@ pub unsafe fn vld2_lane_p16(a: *const p16, b: poly16x4x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2, LANE = 0))] @@ -20527,7 +20527,7 @@ pub unsafe fn vld2q_lane_p16(a: *const p16, b: poly16x8x2_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -20550,7 +20550,7 @@ pub unsafe fn vld2_p64(a: *const p64) -> poly64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -20566,7 +20566,7 @@ pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20585,7 +20585,7 @@ pub unsafe fn vld2_s64(a: *const i64) -> int64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -20608,7 +20608,7 @@ pub unsafe fn vld2_u64(a: *const u64) -> uint64x1x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20631,7 +20631,7 @@ pub unsafe fn vld2_u8(a: *const u8) -> uint8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20654,7 +20654,7 @@ pub unsafe fn vld2q_u8(a: *const u8) -> uint8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20677,7 +20677,7 @@ pub unsafe fn vld2_u16(a: *const u16) -> uint16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20700,7 +20700,7 @@ pub unsafe fn vld2q_u16(a: *const u16) -> uint16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20723,7 +20723,7 @@ pub unsafe fn vld2_u32(a: *const u32) -> uint32x2x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20746,7 +20746,7 @@ pub unsafe fn vld2q_u32(a: *const u32) -> uint32x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20769,7 +20769,7 @@ pub unsafe fn vld2_p8(a: *const p8) -> poly8x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20792,7 +20792,7 @@ pub unsafe fn vld2q_p8(a: *const p8) -> poly8x16x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20815,7 +20815,7 @@ pub unsafe fn vld2_p16(a: *const p16) -> poly16x4x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld2))] @@ -20838,7 +20838,7 @@ pub unsafe fn vld2q_p16(a: *const p16) -> poly16x8x2_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] @@ -20857,7 +20857,7 @@ pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] @@ -20876,7 +20876,7 @@ pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -20900,7 +20900,7 @@ pub unsafe fn vld3_dup_f16(a: *const f16) -> float16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -20924,7 +20924,7 @@ pub unsafe fn vld3q_dup_f16(a: *const f16) -> float16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20943,7 +20943,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20962,7 +20962,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -20981,7 +20981,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21000,7 +21000,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21019,7 +21019,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21038,7 +21038,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21057,7 +21057,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21076,7 +21076,7 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -21095,7 +21095,7 @@ pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -21111,7 +21111,7 @@ pub unsafe fn vld3_dup_f32(a: *const f32) -> float32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -21127,7 +21127,7 @@ pub unsafe fn vld3q_dup_f32(a: *const f32) -> float32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -21143,7 +21143,7 @@ pub unsafe fn vld3_dup_s8(a: *const i8) -> int8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -21159,7 +21159,7 @@ pub unsafe fn vld3q_dup_s8(a: *const i8) -> int8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -21175,7 +21175,7 @@ pub unsafe fn vld3_dup_s16(a: *const i16) -> int16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -21191,7 +21191,7 @@ pub unsafe fn vld3q_dup_s16(a: *const i16) -> int16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -21207,7 +21207,7 @@ pub unsafe fn vld3_dup_s32(a: *const i32) -> int32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -21223,7 +21223,7 @@ pub unsafe fn vld3q_dup_s32(a: *const i32) -> int32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -21246,7 +21246,7 @@ pub unsafe fn vld3_dup_p64(a: *const p64) -> poly64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -21262,7 +21262,7 @@ pub unsafe fn vld3_dup_s64(a: *const i64) -> int64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -21285,7 +21285,7 @@ pub unsafe fn vld3_dup_u64(a: *const u64) -> uint64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21309,7 +21309,7 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21337,7 +21337,7 @@ pub unsafe fn vld3_dup_u8(a: *const u8) -> uint8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21361,7 +21361,7 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21401,7 +21401,7 @@ pub unsafe fn vld3q_dup_u8(a: *const u8) -> uint8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21425,7 +21425,7 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21453,7 +21453,7 @@ pub unsafe fn vld3_dup_u16(a: *const u16) -> uint16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21477,7 +21477,7 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21505,7 +21505,7 @@ pub unsafe fn vld3q_dup_u16(a: *const u16) -> uint16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21529,7 +21529,7 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21557,7 +21557,7 @@ pub unsafe fn vld3_dup_u32(a: *const u32) -> uint32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21581,7 +21581,7 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21609,7 +21609,7 @@ pub unsafe fn vld3q_dup_u32(a: *const u32) -> uint32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21633,7 +21633,7 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21661,7 +21661,7 @@ pub unsafe fn vld3_dup_p8(a: *const p8) -> poly8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21685,7 +21685,7 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21725,7 +21725,7 @@ pub unsafe fn vld3q_dup_p8(a: *const p8) -> poly8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21749,7 +21749,7 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21777,7 +21777,7 @@ pub unsafe fn vld3_dup_p16(a: *const p16) -> poly16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21801,7 +21801,7 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -21829,7 +21829,7 @@ pub unsafe fn vld3q_dup_p16(a: *const p16) -> poly16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] @@ -21848,7 +21848,7 @@ pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] @@ -21867,7 +21867,7 @@ pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -21884,7 +21884,7 @@ pub unsafe fn vld3_f16(a: *const f16) -> float16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -21901,7 +21901,7 @@ pub unsafe fn vld3q_f16(a: *const f16) -> float16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg(not(target_arch = "arm"))] @@ -21913,7 +21913,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg(not(target_arch = "arm"))] @@ -21925,7 +21925,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg(not(target_arch = "arm"))] @@ -21937,7 +21937,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg(not(target_arch = "arm"))] @@ -21949,7 +21949,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg(not(target_arch = "arm"))] @@ -21961,7 +21961,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg(not(target_arch = "arm"))] @@ -21973,7 +21973,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg(not(target_arch = "arm"))] @@ -21985,7 +21985,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg(not(target_arch = "arm"))] @@ -21997,7 +21997,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -22013,7 +22013,7 @@ pub unsafe fn vld3_f32(a: *const f32) -> float32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -22029,7 +22029,7 @@ pub unsafe fn vld3q_f32(a: *const f32) -> float32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -22045,7 +22045,7 @@ pub unsafe fn vld3_s8(a: *const i8) -> int8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -22061,7 +22061,7 @@ pub unsafe fn vld3q_s8(a: *const i8) -> int8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -22077,7 +22077,7 @@ pub unsafe fn vld3_s16(a: *const i16) -> int16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -22093,7 +22093,7 @@ pub unsafe fn vld3q_s16(a: *const i16) -> int16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -22109,7 +22109,7 @@ pub unsafe fn vld3_s32(a: *const i32) -> int32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -22125,7 +22125,7 @@ pub unsafe fn vld3q_s32(a: *const i32) -> int32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] @@ -22152,7 +22152,7 @@ pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] @@ -22179,7 +22179,7 @@ pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -22211,7 +22211,7 @@ pub unsafe fn vld3_lane_f16(a: *const f16, b: float16x4x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr( @@ -22243,7 +22243,7 @@ pub unsafe fn vld3q_lane_f16(a: *const f16, b: float16x8x3_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] @@ -22270,7 +22270,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] @@ -22297,7 +22297,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] @@ -22322,7 +22322,7 @@ pub unsafe fn vld3_lane_f32(a: *const f32, b: float32x2x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] @@ -22349,7 +22349,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] @@ -22376,7 +22376,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] @@ -22403,7 +22403,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] @@ -22430,7 +22430,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld3, LANE = 0))] @@ -22457,7 +22457,7 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] @@ -22482,7 +22482,7 @@ pub unsafe fn vld3_lane_s8(a: *const i8, b: int8x8x3_t) -> int8 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] @@ -22507,7 +22507,7 @@ pub unsafe fn vld3_lane_s16(a: *const i16, b: int16x4x3_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] @@ -22532,7 +22532,7 @@ pub unsafe fn vld3q_lane_s16(a: *const i16, b: int16x8x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] @@ -22557,7 +22557,7 @@ pub unsafe fn vld3_lane_s32(a: *const i32, b: int32x2x3_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] @@ -22582,7 +22582,7 @@ pub unsafe fn vld3q_lane_s32(a: *const i32, b: int32x4x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] @@ -22607,7 +22607,7 @@ pub unsafe fn vld3_lane_u8(a: *const u8, b: uint8x8x3_t) -> uin #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] @@ -22632,7 +22632,7 @@ pub unsafe fn vld3_lane_u16(a: *const u16, b: uint16x4x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] @@ -22657,7 +22657,7 @@ pub unsafe fn vld3q_lane_u16(a: *const u16, b: uint16x8x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] @@ -22682,7 +22682,7 @@ pub unsafe fn vld3_lane_u32(a: *const u32, b: uint32x2x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] @@ -22707,7 +22707,7 @@ pub unsafe fn vld3q_lane_u32(a: *const u32, b: uint32x4x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] @@ -22732,7 +22732,7 @@ pub unsafe fn vld3_lane_p8(a: *const p8, b: poly8x8x3_t) -> pol #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] @@ -22757,7 +22757,7 @@ pub unsafe fn vld3_lane_p16(a: *const p16, b: poly16x4x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3, LANE = 0))] @@ -22782,7 +22782,7 @@ pub unsafe fn vld3q_lane_p16(a: *const p16, b: poly16x8x3_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -22805,7 +22805,7 @@ pub unsafe fn vld3_p64(a: *const p64) -> poly64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[stable(feature = "neon_intrinsics", since = "1.59.0")] #[cfg(not(target_arch = "arm"))] @@ -22817,7 +22817,7 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -22833,7 +22833,7 @@ pub unsafe fn vld3_s64(a: *const i64) -> int64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -22856,7 +22856,7 @@ pub unsafe fn vld3_u64(a: *const u64) -> uint64x1x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -22879,7 +22879,7 @@ pub unsafe fn vld3_u8(a: *const u8) -> uint8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -22902,7 +22902,7 @@ pub unsafe fn vld3q_u8(a: *const u8) -> uint8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -22925,7 +22925,7 @@ pub unsafe fn vld3_u16(a: *const u16) -> uint16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -22948,7 +22948,7 @@ pub unsafe fn vld3q_u16(a: *const u16) -> uint16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -22971,7 +22971,7 @@ pub unsafe fn vld3_u32(a: *const u32) -> uint32x2x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -22994,7 +22994,7 @@ pub unsafe fn vld3q_u32(a: *const u32) -> uint32x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23017,7 +23017,7 @@ pub unsafe fn vld3_p8(a: *const p8) -> poly8x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23040,7 +23040,7 @@ pub unsafe fn vld3q_p8(a: *const p8) -> poly8x16x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23063,7 +23063,7 @@ pub unsafe fn vld3_p16(a: *const p16) -> poly16x4x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld3))] @@ -23086,7 +23086,7 @@ pub unsafe fn vld3q_p16(a: *const p16) -> poly16x8x3_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld3, LANE = 0))] @@ -23111,7 +23111,7 @@ pub unsafe fn vld3q_lane_f32(a: *const f32, b: float32x4x3_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -23129,7 +23129,7 @@ pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -23147,7 +23147,7 @@ pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -23170,7 +23170,7 @@ pub unsafe fn vld4_dup_f16(a: *const f16) -> float16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -23193,7 +23193,7 @@ pub unsafe fn vld4q_dup_f16(a: *const f16) -> float16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] @@ -23209,7 +23209,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] @@ -23225,7 +23225,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] @@ -23241,7 +23241,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] @@ -23257,7 +23257,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] @@ -23273,7 +23273,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] @@ -23289,7 +23289,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] @@ -23305,7 +23305,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vld4))] @@ -23321,7 +23321,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4r))] @@ -23340,7 +23340,7 @@ pub unsafe fn vld4_dup_f32(a: *const f32) -> float32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4r))] @@ -23359,7 +23359,7 @@ pub unsafe fn vld4q_dup_f32(a: *const f32) -> float32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4r))] @@ -23378,7 +23378,7 @@ pub unsafe fn vld4_dup_s8(a: *const i8) -> int8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4r))] @@ -23397,7 +23397,7 @@ pub unsafe fn vld4q_dup_s8(a: *const i8) -> int8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4r))] @@ -23416,7 +23416,7 @@ pub unsafe fn vld4_dup_s16(a: *const i16) -> int16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4r))] @@ -23435,7 +23435,7 @@ pub unsafe fn vld4q_dup_s16(a: *const i16) -> int16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4r))] @@ -23454,7 +23454,7 @@ pub unsafe fn vld4_dup_s32(a: *const i32) -> int32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4r))] @@ -23473,7 +23473,7 @@ pub unsafe fn vld4q_dup_s32(a: *const i32) -> int32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4r))] @@ -23492,7 +23492,7 @@ pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -23515,7 +23515,7 @@ pub unsafe fn vld4_dup_p64(a: *const p64) -> poly64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(nop))] @@ -23531,7 +23531,7 @@ pub unsafe fn vld4_dup_s64(a: *const i64) -> int64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -23554,7 +23554,7 @@ pub unsafe fn vld4_dup_u64(a: *const u64) -> uint64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23578,7 +23578,7 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23607,7 +23607,7 @@ pub unsafe fn vld4_dup_u8(a: *const u8) -> uint8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23631,7 +23631,7 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23676,7 +23676,7 @@ pub unsafe fn vld4q_dup_u8(a: *const u8) -> uint8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23700,7 +23700,7 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23729,7 +23729,7 @@ pub unsafe fn vld4_dup_u16(a: *const u16) -> uint16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23753,7 +23753,7 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23782,7 +23782,7 @@ pub unsafe fn vld4q_dup_u16(a: *const u16) -> uint16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23806,7 +23806,7 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23835,7 +23835,7 @@ pub unsafe fn vld4_dup_u32(a: *const u32) -> uint32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23859,7 +23859,7 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23888,7 +23888,7 @@ pub unsafe fn vld4q_dup_u32(a: *const u32) -> uint32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23912,7 +23912,7 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23941,7 +23941,7 @@ pub unsafe fn vld4_dup_p8(a: *const p8) -> poly8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -23965,7 +23965,7 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -24010,7 +24010,7 @@ pub unsafe fn vld4q_dup_p8(a: *const p8) -> poly8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -24034,7 +24034,7 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -24063,7 +24063,7 @@ pub unsafe fn vld4_dup_p16(a: *const p16) -> poly16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -24087,7 +24087,7 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_dup_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -24116,7 +24116,7 @@ pub unsafe fn vld4q_dup_p16(a: *const p16) -> poly16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -24134,7 +24134,7 @@ pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg(target_arch = "arm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -24152,7 +24152,7 @@ pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -24168,7 +24168,7 @@ pub unsafe fn vld4_f16(a: *const f16) -> float16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -24184,7 +24184,7 @@ pub unsafe fn vld4q_f16(a: *const f16) -> float16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24196,7 +24196,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24208,7 +24208,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24220,7 +24220,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24232,7 +24232,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24244,7 +24244,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24256,7 +24256,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24268,7 +24268,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -24280,7 +24280,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -24296,7 +24296,7 @@ pub unsafe fn vld4_f32(a: *const f32) -> float32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -24312,7 +24312,7 @@ pub unsafe fn vld4q_f32(a: *const f32) -> float32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -24328,7 +24328,7 @@ pub unsafe fn vld4_s8(a: *const i8) -> int8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -24344,7 +24344,7 @@ pub unsafe fn vld4q_s8(a: *const i8) -> int8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -24360,7 +24360,7 @@ pub unsafe fn vld4_s16(a: *const i16) -> int16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -24376,7 +24376,7 @@ pub unsafe fn vld4q_s16(a: *const i16) -> int16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -24392,7 +24392,7 @@ pub unsafe fn vld4_s32(a: *const i32) -> int32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -24408,7 +24408,7 @@ pub unsafe fn vld4q_s32(a: *const i32) -> int32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] @@ -24436,7 +24436,7 @@ pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] @@ -24464,7 +24464,7 @@ pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -24496,7 +24496,7 @@ pub unsafe fn vld4_lane_f16(a: *const f16, b: float16x4x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr( all(test, any(target_arch = "aarch64", target_arch = "arm64ec")), @@ -24528,7 +24528,7 @@ pub unsafe fn vld4q_lane_f16(a: *const f16, b: float16x8x4_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] @@ -24556,7 +24556,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] @@ -24584,7 +24584,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] @@ -24612,7 +24612,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] @@ -24640,7 +24640,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] @@ -24668,7 +24668,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] @@ -24696,7 +24696,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(ld4, LANE = 0))] @@ -24724,7 +24724,7 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] @@ -24750,7 +24750,7 @@ pub unsafe fn vld4_lane_f32(a: *const f32, b: float32x2x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] @@ -24776,7 +24776,7 @@ pub unsafe fn vld4q_lane_f32(a: *const f32, b: float32x4x4_t) - #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] @@ -24802,7 +24802,7 @@ pub unsafe fn vld4_lane_s8(a: *const i8, b: int8x8x4_t) -> int8 #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] @@ -24828,7 +24828,7 @@ pub unsafe fn vld4_lane_s16(a: *const i16, b: int16x4x4_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] @@ -24854,7 +24854,7 @@ pub unsafe fn vld4q_lane_s16(a: *const i16, b: int16x8x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] @@ -24880,7 +24880,7 @@ pub unsafe fn vld4_lane_s32(a: *const i32, b: int32x2x4_t) -> i #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[cfg_attr(test, assert_instr(vld4, LANE = 0))] @@ -24906,7 +24906,7 @@ pub unsafe fn vld4q_lane_s32(a: *const i32, b: int32x4x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] @@ -24931,7 +24931,7 @@ pub unsafe fn vld4_lane_u8(a: *const u8, b: uint8x8x4_t) -> uin #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] @@ -24956,7 +24956,7 @@ pub unsafe fn vld4_lane_u16(a: *const u16, b: uint16x4x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] @@ -24981,7 +24981,7 @@ pub unsafe fn vld4q_lane_u16(a: *const u16, b: uint16x8x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] @@ -25006,7 +25006,7 @@ pub unsafe fn vld4_lane_u32(a: *const u32, b: uint32x2x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] @@ -25031,7 +25031,7 @@ pub unsafe fn vld4q_lane_u32(a: *const u32, b: uint32x4x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] @@ -25056,7 +25056,7 @@ pub unsafe fn vld4_lane_p8(a: *const p8, b: poly8x8x4_t) -> pol #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] @@ -25081,7 +25081,7 @@ pub unsafe fn vld4_lane_p16(a: *const p16, b: poly16x4x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4, LANE = 0))] @@ -25106,7 +25106,7 @@ pub unsafe fn vld4q_lane_p16(a: *const p16, b: poly16x8x4_t) -> #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,aes")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -25129,7 +25129,7 @@ pub unsafe fn vld4_p64(a: *const p64) -> poly64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -25141,7 +25141,7 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -25157,7 +25157,7 @@ pub unsafe fn vld4_s64(a: *const i64) -> int64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -25180,7 +25180,7 @@ pub unsafe fn vld4_u64(a: *const u64) -> uint64x1x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -25203,7 +25203,7 @@ pub unsafe fn vld4_u8(a: *const u8) -> uint8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -25226,7 +25226,7 @@ pub unsafe fn vld4q_u8(a: *const u8) -> uint8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -25249,7 +25249,7 @@ pub unsafe fn vld4_u16(a: *const u16) -> uint16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -25272,7 +25272,7 @@ pub unsafe fn vld4q_u16(a: *const u16) -> uint16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -25295,7 +25295,7 @@ pub unsafe fn vld4_u32(a: *const u32) -> uint32x2x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -25318,7 +25318,7 @@ pub unsafe fn vld4q_u32(a: *const u32) -> uint32x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -25341,7 +25341,7 @@ pub unsafe fn vld4_p8(a: *const p8) -> poly8x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -25364,7 +25364,7 @@ pub unsafe fn vld4q_p8(a: *const p8) -> poly8x16x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -25387,7 +25387,7 @@ pub unsafe fn vld4_p16(a: *const p16) -> poly16x4x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vld4q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vld4))] @@ -25410,7 +25410,7 @@ pub unsafe fn vld4q_p16(a: *const p16) -> poly16x8x4_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vldrq_p128)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -25431,7 +25431,7 @@ pub unsafe fn vldrq_p128(a: *const p128) -> p128 { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( @@ -25461,7 +25461,7 @@ pub fn vmax_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] #[cfg_attr( @@ -25491,7 +25491,7 @@ pub fn vmaxq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25520,7 +25520,7 @@ pub fn vmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25549,7 +25549,7 @@ pub fn vmaxq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25573,7 +25573,7 @@ pub fn vmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25597,7 +25597,7 @@ pub fn vmaxq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25621,7 +25621,7 @@ pub fn vmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25645,7 +25645,7 @@ pub fn vmaxq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25669,7 +25669,7 @@ pub fn vmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25693,7 +25693,7 @@ pub fn vmaxq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25717,7 +25717,7 @@ pub fn vmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25741,7 +25741,7 @@ pub fn vmaxq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25765,7 +25765,7 @@ pub fn vmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25789,7 +25789,7 @@ pub fn vmaxq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmax_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25813,7 +25813,7 @@ pub fn vmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Maximum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmax))] @@ -25837,7 +25837,7 @@ pub fn vmaxq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr( @@ -25867,7 +25867,7 @@ pub fn vmaxnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] #[cfg_attr( @@ -25897,7 +25897,7 @@ pub fn vmaxnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnm_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] @@ -25926,7 +25926,7 @@ pub fn vmaxnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Floating-point Maximum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmaxnmq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmaxnm))] @@ -25955,7 +25955,7 @@ pub fn vmaxnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( @@ -25985,7 +25985,7 @@ pub fn vmin_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] #[cfg_attr( @@ -26015,7 +26015,7 @@ pub fn vminq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26044,7 +26044,7 @@ pub fn vmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26073,7 +26073,7 @@ pub fn vminq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26097,7 +26097,7 @@ pub fn vmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26121,7 +26121,7 @@ pub fn vminq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26145,7 +26145,7 @@ pub fn vmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26169,7 +26169,7 @@ pub fn vminq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26193,7 +26193,7 @@ pub fn vmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26217,7 +26217,7 @@ pub fn vminq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26241,7 +26241,7 @@ pub fn vmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26265,7 +26265,7 @@ pub fn vminq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26289,7 +26289,7 @@ pub fn vmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26313,7 +26313,7 @@ pub fn vminq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmin_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26337,7 +26337,7 @@ pub fn vmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Minimum (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmin))] @@ -26361,7 +26361,7 @@ pub fn vminq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( @@ -26391,7 +26391,7 @@ pub fn vminnm_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] #[cfg_attr( @@ -26421,7 +26421,7 @@ pub fn vminnmq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnm_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] @@ -26450,7 +26450,7 @@ pub fn vminnm_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Floating-point Minimum Number (vector)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vminnmq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vminnm))] @@ -26479,7 +26479,7 @@ pub fn vminnmq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] @@ -26500,7 +26500,7 @@ pub fn vmla_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { } #[doc = "Floating-point multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] @@ -26521,7 +26521,7 @@ pub fn vmlaq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] @@ -26548,7 +26548,7 @@ pub fn vmla_lane_f32( } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] @@ -26575,7 +26575,7 @@ pub fn vmla_laneq_f32( } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] @@ -26608,7 +26608,7 @@ pub fn vmlaq_lane_f32( } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32", LANE = 1))] @@ -26641,7 +26641,7 @@ pub fn vmlaq_laneq_f32( } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] @@ -26670,7 +26670,7 @@ pub fn vmla_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] @@ -26699,7 +26699,7 @@ pub fn vmla_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_ } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] @@ -26728,7 +26728,7 @@ pub fn vmla_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] @@ -26757,7 +26757,7 @@ pub fn vmla_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8 } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] @@ -26799,7 +26799,7 @@ pub fn vmlaq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] @@ -26841,7 +26841,7 @@ pub fn vmlaq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4 } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] @@ -26883,7 +26883,7 @@ pub fn vmlaq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16", LANE = 1))] @@ -26925,7 +26925,7 @@ pub fn vmlaq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] @@ -26948,7 +26948,7 @@ pub fn vmla_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] @@ -26971,7 +26971,7 @@ pub fn vmla_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_ } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] @@ -26994,7 +26994,7 @@ pub fn vmla_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] @@ -27017,7 +27017,7 @@ pub fn vmla_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4 } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] @@ -27046,7 +27046,7 @@ pub fn vmlaq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] @@ -27075,7 +27075,7 @@ pub fn vmlaq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2 } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] @@ -27104,7 +27104,7 @@ pub fn vmlaq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32", LANE = 1))] @@ -27133,7 +27133,7 @@ pub fn vmlaq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] @@ -27154,7 +27154,7 @@ pub fn vmla_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.f32"))] @@ -27175,7 +27175,7 @@ pub fn vmlaq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] @@ -27196,7 +27196,7 @@ pub fn vmla_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] @@ -27217,7 +27217,7 @@ pub fn vmlaq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] @@ -27238,7 +27238,7 @@ pub fn vmla_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] @@ -27259,7 +27259,7 @@ pub fn vmlaq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] @@ -27280,7 +27280,7 @@ pub fn vmla_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] @@ -27301,7 +27301,7 @@ pub fn vmlaq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] @@ -27322,7 +27322,7 @@ pub fn vmla_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { } #[doc = "Vector multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] @@ -27343,7 +27343,7 @@ pub fn vmlaq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] @@ -27364,7 +27364,7 @@ pub fn vmla_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] @@ -27385,7 +27385,7 @@ pub fn vmlaq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] @@ -27406,7 +27406,7 @@ pub fn vmla_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] @@ -27427,7 +27427,7 @@ pub fn vmlaq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] @@ -27448,7 +27448,7 @@ pub fn vmla_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] @@ -27469,7 +27469,7 @@ pub fn vmlaq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] @@ -27490,7 +27490,7 @@ pub fn vmla_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i8"))] @@ -27511,7 +27511,7 @@ pub fn vmlaq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] @@ -27532,7 +27532,7 @@ pub fn vmla_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i16"))] @@ -27553,7 +27553,7 @@ pub fn vmlaq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmla_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] @@ -27574,7 +27574,7 @@ pub fn vmla_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { } #[doc = "Multiply-add to accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlaq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmla.i32"))] @@ -27595,7 +27595,7 @@ pub fn vmlaq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] @@ -27624,7 +27624,7 @@ pub fn vmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16", LANE = 1))] @@ -27653,7 +27653,7 @@ pub fn vmlal_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] @@ -27676,7 +27676,7 @@ pub fn vmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32", LANE = 1))] @@ -27699,7 +27699,7 @@ pub fn vmlal_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] @@ -27728,7 +27728,7 @@ pub fn vmlal_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4 } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16", LANE = 1))] @@ -27757,7 +27757,7 @@ pub fn vmlal_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] @@ -27780,7 +27780,7 @@ pub fn vmlal_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2 } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32", LANE = 1))] @@ -27803,7 +27803,7 @@ pub fn vmlal_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] @@ -27824,7 +27824,7 @@ pub fn vmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] @@ -27845,7 +27845,7 @@ pub fn vmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] @@ -27866,7 +27866,7 @@ pub fn vmlal_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { } #[doc = "Vector widening multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] @@ -27887,7 +27887,7 @@ pub fn vmlal_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { } #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s8"))] @@ -27908,7 +27908,7 @@ pub fn vmlal_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { } #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s16"))] @@ -27929,7 +27929,7 @@ pub fn vmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { } #[doc = "Signed multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.s32"))] @@ -27950,7 +27950,7 @@ pub fn vmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { } #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u8"))] @@ -27971,7 +27971,7 @@ pub fn vmlal_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { } #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u16"))] @@ -27992,7 +27992,7 @@ pub fn vmlal_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { } #[doc = "Unsigned multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlal_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlal.u32"))] @@ -28013,7 +28013,7 @@ pub fn vmlal_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { } #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] @@ -28034,7 +28034,7 @@ pub fn vmls_f32(a: float32x2_t, b: float32x2_t, c: float32x2_t) -> float32x2_t { } #[doc = "Floating-point multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] @@ -28055,7 +28055,7 @@ pub fn vmlsq_f32(a: float32x4_t, b: float32x4_t, c: float32x4_t) -> float32x4_t } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] @@ -28082,7 +28082,7 @@ pub fn vmls_lane_f32( } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] @@ -28109,7 +28109,7 @@ pub fn vmls_laneq_f32( } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] @@ -28142,7 +28142,7 @@ pub fn vmlsq_lane_f32( } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32", LANE = 1))] @@ -28175,7 +28175,7 @@ pub fn vmlsq_laneq_f32( } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] @@ -28204,7 +28204,7 @@ pub fn vmls_lane_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] @@ -28233,7 +28233,7 @@ pub fn vmls_lane_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_ } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] @@ -28262,7 +28262,7 @@ pub fn vmls_laneq_s16(a: int16x4_t, b: int16x4_t, c: int16x8_t) } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] @@ -28291,7 +28291,7 @@ pub fn vmls_laneq_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x8 } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] @@ -28333,7 +28333,7 @@ pub fn vmlsq_lane_s16(a: int16x8_t, b: int16x8_t, c: int16x4_t) } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] @@ -28375,7 +28375,7 @@ pub fn vmlsq_lane_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x4 } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] @@ -28417,7 +28417,7 @@ pub fn vmlsq_laneq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16", LANE = 1))] @@ -28459,7 +28459,7 @@ pub fn vmlsq_laneq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] @@ -28482,7 +28482,7 @@ pub fn vmls_lane_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] @@ -28505,7 +28505,7 @@ pub fn vmls_lane_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_ } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] @@ -28528,7 +28528,7 @@ pub fn vmls_laneq_s32(a: int32x2_t, b: int32x2_t, c: int32x4_t) } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] @@ -28551,7 +28551,7 @@ pub fn vmls_laneq_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x4 } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] @@ -28580,7 +28580,7 @@ pub fn vmlsq_lane_s32(a: int32x4_t, b: int32x4_t, c: int32x2_t) } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] @@ -28609,7 +28609,7 @@ pub fn vmlsq_lane_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x2 } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] @@ -28638,7 +28638,7 @@ pub fn vmlsq_laneq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32", LANE = 1))] @@ -28667,7 +28667,7 @@ pub fn vmlsq_laneq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] @@ -28688,7 +28688,7 @@ pub fn vmls_n_f32(a: float32x2_t, b: float32x2_t, c: f32) -> float32x2_t { } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.f32"))] @@ -28709,7 +28709,7 @@ pub fn vmlsq_n_f32(a: float32x4_t, b: float32x4_t, c: f32) -> float32x4_t { } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] @@ -28730,7 +28730,7 @@ pub fn vmls_n_s16(a: int16x4_t, b: int16x4_t, c: i16) -> int16x4_t { } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] @@ -28751,7 +28751,7 @@ pub fn vmlsq_n_s16(a: int16x8_t, b: int16x8_t, c: i16) -> int16x8_t { } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] @@ -28772,7 +28772,7 @@ pub fn vmls_n_u16(a: uint16x4_t, b: uint16x4_t, c: u16) -> uint16x4_t { } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] @@ -28793,7 +28793,7 @@ pub fn vmlsq_n_u16(a: uint16x8_t, b: uint16x8_t, c: u16) -> uint16x8_t { } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] @@ -28814,7 +28814,7 @@ pub fn vmls_n_s32(a: int32x2_t, b: int32x2_t, c: i32) -> int32x2_t { } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] @@ -28835,7 +28835,7 @@ pub fn vmlsq_n_s32(a: int32x4_t, b: int32x4_t, c: i32) -> int32x4_t { } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] @@ -28856,7 +28856,7 @@ pub fn vmls_n_u32(a: uint32x2_t, b: uint32x2_t, c: u32) -> uint32x2_t { } #[doc = "Vector multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] @@ -28877,7 +28877,7 @@ pub fn vmlsq_n_u32(a: uint32x4_t, b: uint32x4_t, c: u32) -> uint32x4_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] @@ -28898,7 +28898,7 @@ pub fn vmls_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] @@ -28919,7 +28919,7 @@ pub fn vmlsq_s8(a: int8x16_t, b: int8x16_t, c: int8x16_t) -> int8x16_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] @@ -28940,7 +28940,7 @@ pub fn vmls_s16(a: int16x4_t, b: int16x4_t, c: int16x4_t) -> int16x4_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] @@ -28961,7 +28961,7 @@ pub fn vmlsq_s16(a: int16x8_t, b: int16x8_t, c: int16x8_t) -> int16x8_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] @@ -28982,7 +28982,7 @@ pub fn vmls_s32(a: int32x2_t, b: int32x2_t, c: int32x2_t) -> int32x2_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] @@ -29003,7 +29003,7 @@ pub fn vmlsq_s32(a: int32x4_t, b: int32x4_t, c: int32x4_t) -> int32x4_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] @@ -29024,7 +29024,7 @@ pub fn vmls_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i8"))] @@ -29045,7 +29045,7 @@ pub fn vmlsq_u8(a: uint8x16_t, b: uint8x16_t, c: uint8x16_t) -> uint8x16_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] @@ -29066,7 +29066,7 @@ pub fn vmls_u16(a: uint16x4_t, b: uint16x4_t, c: uint16x4_t) -> uint16x4_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i16"))] @@ -29087,7 +29087,7 @@ pub fn vmlsq_u16(a: uint16x8_t, b: uint16x8_t, c: uint16x8_t) -> uint16x8_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmls_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] @@ -29108,7 +29108,7 @@ pub fn vmls_u32(a: uint32x2_t, b: uint32x2_t, c: uint32x2_t) -> uint32x2_t { } #[doc = "Multiply-subtract from accumulator"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmls.i32"))] @@ -29129,7 +29129,7 @@ pub fn vmlsq_u32(a: uint32x4_t, b: uint32x4_t, c: uint32x4_t) -> uint32x4_t { } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] @@ -29158,7 +29158,7 @@ pub fn vmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16", LANE = 1))] @@ -29187,7 +29187,7 @@ pub fn vmlsl_laneq_s16(a: int32x4_t, b: int16x4_t, c: int16x8_t } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] @@ -29210,7 +29210,7 @@ pub fn vmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32", LANE = 1))] @@ -29233,7 +29233,7 @@ pub fn vmlsl_laneq_s32(a: int64x2_t, b: int32x2_t, c: int32x4_t } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] @@ -29262,7 +29262,7 @@ pub fn vmlsl_lane_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4 } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16", LANE = 1))] @@ -29291,7 +29291,7 @@ pub fn vmlsl_laneq_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] @@ -29314,7 +29314,7 @@ pub fn vmlsl_lane_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2 } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32", LANE = 1))] @@ -29337,7 +29337,7 @@ pub fn vmlsl_laneq_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] @@ -29358,7 +29358,7 @@ pub fn vmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] @@ -29379,7 +29379,7 @@ pub fn vmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] @@ -29400,7 +29400,7 @@ pub fn vmlsl_n_u16(a: uint32x4_t, b: uint16x4_t, c: u16) -> uint32x4_t { } #[doc = "Vector widening multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] @@ -29421,7 +29421,7 @@ pub fn vmlsl_n_u32(a: uint64x2_t, b: uint32x2_t, c: u32) -> uint64x2_t { } #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s8"))] @@ -29442,7 +29442,7 @@ pub fn vmlsl_s8(a: int16x8_t, b: int8x8_t, c: int8x8_t) -> int16x8_t { } #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s16"))] @@ -29463,7 +29463,7 @@ pub fn vmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { } #[doc = "Signed multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.s32"))] @@ -29484,7 +29484,7 @@ pub fn vmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { } #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u8"))] @@ -29505,7 +29505,7 @@ pub fn vmlsl_u8(a: uint16x8_t, b: uint8x8_t, c: uint8x8_t) -> uint16x8_t { } #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u16"))] @@ -29526,7 +29526,7 @@ pub fn vmlsl_u16(a: uint32x4_t, b: uint16x4_t, c: uint16x4_t) -> uint32x4_t { } #[doc = "Unsigned multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmlsl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmlsl.u32"))] @@ -29547,7 +29547,7 @@ pub fn vmlsl_u32(a: uint64x2_t, b: uint32x2_t, c: uint32x2_t) -> uint64x2_t { } #[doc = "8-bit integer matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -29576,7 +29576,7 @@ pub fn vmmlaq_s32(a: int32x4_t, b: int8x16_t, c: int8x16_t) -> int32x4_t { } #[doc = "8-bit integer matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmmlaq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -29605,7 +29605,7 @@ pub fn vmmlaq_u32(a: uint32x4_t, b: uint8x16_t, c: uint8x16_t) -> uint32x4_t { } #[doc = "Duplicate element to vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -29621,7 +29621,7 @@ pub fn vmov_n_f16(a: f16) -> float16x4_t { } #[doc = "Duplicate element to vector"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_f16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -29637,7 +29637,7 @@ pub fn vmovq_n_f16(a: f16) -> float16x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -29658,7 +29658,7 @@ pub fn vmov_n_f32(value: f32) -> float32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -29679,7 +29679,7 @@ pub fn vmov_n_p16(value: p16) -> poly16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -29700,7 +29700,7 @@ pub fn vmov_n_p8(value: p8) -> poly8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -29721,7 +29721,7 @@ pub fn vmov_n_s16(value: i16) -> int16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -29742,7 +29742,7 @@ pub fn vmov_n_s32(value: i32) -> int32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -29763,7 +29763,7 @@ pub fn vmov_n_s64(value: i64) -> int64x1_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -29784,7 +29784,7 @@ pub fn vmov_n_s8(value: i8) -> int8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -29805,7 +29805,7 @@ pub fn vmov_n_u16(value: u16) -> uint16x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -29826,7 +29826,7 @@ pub fn vmov_n_u32(value: u32) -> uint32x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -29847,7 +29847,7 @@ pub fn vmov_n_u64(value: u64) -> uint64x1_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmov_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -29868,7 +29868,7 @@ pub fn vmov_n_u8(value: u8) -> uint8x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -29889,7 +29889,7 @@ pub fn vmovq_n_f32(value: f32) -> float32x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -29910,7 +29910,7 @@ pub fn vmovq_n_p16(value: p16) -> poly16x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -29931,7 +29931,7 @@ pub fn vmovq_n_p8(value: p8) -> poly8x16_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -29952,7 +29952,7 @@ pub fn vmovq_n_s16(value: i16) -> int16x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -29973,7 +29973,7 @@ pub fn vmovq_n_s32(value: i32) -> int32x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -29994,7 +29994,7 @@ pub fn vmovq_n_s64(value: i64) -> int64x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -30015,7 +30015,7 @@ pub fn vmovq_n_s8(value: i8) -> int8x16_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.16"))] @@ -30036,7 +30036,7 @@ pub fn vmovq_n_u16(value: u16) -> uint16x8_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.32"))] @@ -30057,7 +30057,7 @@ pub fn vmovq_n_u32(value: u32) -> uint32x4_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmov"))] @@ -30078,7 +30078,7 @@ pub fn vmovq_n_u64(value: u64) -> uint64x2_t { } #[doc = "Duplicate vector element to vector or scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovq_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vdup.8"))] @@ -30099,7 +30099,7 @@ pub fn vmovq_n_u8(value: u8) -> uint8x16_t { } #[doc = "Vector long move."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] @@ -30120,7 +30120,7 @@ pub fn vmovl_s16(a: int16x4_t) -> int32x4_t { } #[doc = "Vector long move."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] @@ -30141,7 +30141,7 @@ pub fn vmovl_s32(a: int32x2_t) -> int64x2_t { } #[doc = "Vector long move."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] @@ -30162,7 +30162,7 @@ pub fn vmovl_s8(a: int8x8_t) -> int16x8_t { } #[doc = "Vector long move."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] @@ -30183,7 +30183,7 @@ pub fn vmovl_u16(a: uint16x4_t) -> uint32x4_t { } #[doc = "Vector long move."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] @@ -30204,7 +30204,7 @@ pub fn vmovl_u32(a: uint32x2_t) -> uint64x2_t { } #[doc = "Vector long move."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovl))] @@ -30225,7 +30225,7 @@ pub fn vmovl_u8(a: uint8x8_t) -> uint16x8_t { } #[doc = "Vector narrow integer."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] @@ -30246,7 +30246,7 @@ pub fn vmovn_s16(a: int16x8_t) -> int8x8_t { } #[doc = "Vector narrow integer."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] @@ -30267,7 +30267,7 @@ pub fn vmovn_s32(a: int32x4_t) -> int16x4_t { } #[doc = "Vector narrow integer."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] @@ -30288,7 +30288,7 @@ pub fn vmovn_s64(a: int64x2_t) -> int32x2_t { } #[doc = "Vector narrow integer."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] @@ -30309,7 +30309,7 @@ pub fn vmovn_u16(a: uint16x8_t) -> uint8x8_t { } #[doc = "Vector narrow integer."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] @@ -30330,7 +30330,7 @@ pub fn vmovn_u32(a: uint32x4_t) -> uint16x4_t { } #[doc = "Vector narrow integer."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmovn_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmovn))] @@ -30351,7 +30351,7 @@ pub fn vmovn_u64(a: uint64x2_t) -> uint32x2_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f16"))] #[cfg_attr( @@ -30373,7 +30373,7 @@ pub fn vmul_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f16"))] #[cfg_attr( @@ -30395,7 +30395,7 @@ pub fn vmulq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] @@ -30416,7 +30416,7 @@ pub fn vmul_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.f32"))] @@ -30437,7 +30437,7 @@ pub fn vmulq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( @@ -30466,7 +30466,7 @@ pub fn vmul_lane_f16(a: float16x4_t, v: float16x4_t) -> float16 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] #[cfg_attr( @@ -30508,7 +30508,7 @@ pub fn vmulq_lane_f16(a: float16x8_t, v: float16x4_t) -> float1 } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] @@ -30531,7 +30531,7 @@ pub fn vmul_lane_f32(a: float32x2_t, b: float32x2_t) -> float32 } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] @@ -30554,7 +30554,7 @@ pub fn vmul_laneq_f32(a: float32x2_t, b: float32x4_t) -> float3 } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] @@ -30582,7 +30582,7 @@ pub fn vmulq_lane_f32(a: float32x4_t, b: float32x2_t) -> float3 } #[doc = "Floating-point multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 0))] @@ -30610,7 +30610,7 @@ pub fn vmulq_laneq_f32(a: float32x4_t, b: float32x4_t) -> float } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30638,7 +30638,7 @@ pub fn vmul_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30679,7 +30679,7 @@ pub fn vmulq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x8_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30702,7 +30702,7 @@ pub fn vmul_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30730,7 +30730,7 @@ pub fn vmulq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x4_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30758,7 +30758,7 @@ pub fn vmul_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_ } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30799,7 +30799,7 @@ pub fn vmulq_lane_u16(a: uint16x8_t, b: uint16x4_t) -> uint16x8 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30822,7 +30822,7 @@ pub fn vmul_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_ } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30850,7 +30850,7 @@ pub fn vmulq_lane_u32(a: uint32x4_t, b: uint32x2_t) -> uint32x4 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30878,7 +30878,7 @@ pub fn vmul_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30919,7 +30919,7 @@ pub fn vmulq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30942,7 +30942,7 @@ pub fn vmul_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30970,7 +30970,7 @@ pub fn vmulq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -30998,7 +30998,7 @@ pub fn vmul_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint16x4 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -31039,7 +31039,7 @@ pub fn vmulq_laneq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -31062,7 +31062,7 @@ pub fn vmul_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint32x2 } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul, LANE = 1))] @@ -31090,7 +31090,7 @@ pub fn vmulq_laneq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( @@ -31105,7 +31105,7 @@ pub fn vmul_n_f16(a: float16x4_t, b: f16) -> float16x4_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] #[cfg_attr( @@ -31120,7 +31120,7 @@ pub fn vmulq_n_f16(a: float16x8_t, b: f16) -> float16x8_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31141,7 +31141,7 @@ pub fn vmul_n_f32(a: float32x2_t, b: f32) -> float32x2_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31162,7 +31162,7 @@ pub fn vmulq_n_f32(a: float32x4_t, b: f32) -> float32x4_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31183,7 +31183,7 @@ pub fn vmul_n_s16(a: int16x4_t, b: i16) -> int16x4_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31204,7 +31204,7 @@ pub fn vmulq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31225,7 +31225,7 @@ pub fn vmul_n_s32(a: int32x2_t, b: i32) -> int32x2_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31246,7 +31246,7 @@ pub fn vmulq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31267,7 +31267,7 @@ pub fn vmul_n_u16(a: uint16x4_t, b: u16) -> uint16x4_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31288,7 +31288,7 @@ pub fn vmulq_n_u16(a: uint16x8_t, b: u16) -> uint16x8_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31309,7 +31309,7 @@ pub fn vmul_n_u32(a: uint32x2_t, b: u32) -> uint32x2_t { } #[doc = "Vector multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31330,7 +31330,7 @@ pub fn vmulq_n_u32(a: uint32x4_t, b: u32) -> uint32x4_t { } #[doc = "Polynomial multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31359,7 +31359,7 @@ pub fn vmul_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Polynomial multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmul))] @@ -31388,7 +31388,7 @@ pub fn vmulq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] @@ -31409,7 +31409,7 @@ pub fn vmul_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] @@ -31430,7 +31430,7 @@ pub fn vmulq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] @@ -31451,7 +31451,7 @@ pub fn vmul_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i16"))] @@ -31472,7 +31472,7 @@ pub fn vmulq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] @@ -31493,7 +31493,7 @@ pub fn vmul_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] @@ -31514,7 +31514,7 @@ pub fn vmulq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] @@ -31535,7 +31535,7 @@ pub fn vmul_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i32"))] @@ -31556,7 +31556,7 @@ pub fn vmulq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] @@ -31577,7 +31577,7 @@ pub fn vmul_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] @@ -31598,7 +31598,7 @@ pub fn vmulq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmul_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] @@ -31619,7 +31619,7 @@ pub fn vmul_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Multiply"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmulq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmul.i8"))] @@ -31640,7 +31640,7 @@ pub fn vmulq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] @@ -31668,7 +31668,7 @@ pub fn vmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t } #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] @@ -31696,7 +31696,7 @@ pub fn vmull_laneq_s16(a: int16x4_t, b: int16x8_t) -> int32x4_t } #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] @@ -31719,7 +31719,7 @@ pub fn vmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t } #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] @@ -31742,7 +31742,7 @@ pub fn vmull_laneq_s32(a: int32x2_t, b: int32x4_t) -> int64x2_t } #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] @@ -31770,7 +31770,7 @@ pub fn vmull_lane_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4 } #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] @@ -31798,7 +31798,7 @@ pub fn vmull_laneq_u16(a: uint16x4_t, b: uint16x8_t) -> uint32x } #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] @@ -31821,7 +31821,7 @@ pub fn vmull_lane_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2 } #[doc = "Vector long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_laneq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull, LANE = 1))] @@ -31844,7 +31844,7 @@ pub fn vmull_laneq_u32(a: uint32x2_t, b: uint32x4_t) -> uint64x } #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] @@ -31865,7 +31865,7 @@ pub fn vmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { } #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] @@ -31886,7 +31886,7 @@ pub fn vmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { } #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] @@ -31907,7 +31907,7 @@ pub fn vmull_n_u16(a: uint16x4_t, b: u16) -> uint32x4_t { } #[doc = "Vector long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmull))] @@ -31928,7 +31928,7 @@ pub fn vmull_n_u32(a: uint32x2_t, b: u32) -> uint64x2_t { } #[doc = "Polynomial multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.p8"))] @@ -31957,7 +31957,7 @@ pub fn vmull_p8(a: poly8x8_t, b: poly8x8_t) -> poly16x8_t { } #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s16"))] @@ -31978,7 +31978,7 @@ pub fn vmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { } #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s32"))] @@ -31999,7 +31999,7 @@ pub fn vmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { } #[doc = "Signed multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.s8"))] @@ -32020,7 +32020,7 @@ pub fn vmull_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u8"))] @@ -32041,7 +32041,7 @@ pub fn vmull_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u16"))] @@ -32062,7 +32062,7 @@ pub fn vmull_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { } #[doc = "Unsigned multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmull_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vmull.u32"))] @@ -32083,7 +32083,7 @@ pub fn vmull_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32105,7 +32105,7 @@ pub fn vmvn_p8(a: poly8x8_t) -> poly8x8_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32127,7 +32127,7 @@ pub fn vmvn_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32149,7 +32149,7 @@ pub fn vmvn_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32171,7 +32171,7 @@ pub fn vmvn_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32193,7 +32193,7 @@ pub fn vmvn_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32215,7 +32215,7 @@ pub fn vmvn_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvn_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32237,7 +32237,7 @@ pub fn vmvn_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32259,7 +32259,7 @@ pub fn vmvnq_p8(a: poly8x16_t) -> poly8x16_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32281,7 +32281,7 @@ pub fn vmvnq_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32303,7 +32303,7 @@ pub fn vmvnq_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32325,7 +32325,7 @@ pub fn vmvnq_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32347,7 +32347,7 @@ pub fn vmvnq_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32369,7 +32369,7 @@ pub fn vmvnq_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Vector bitwise not."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vmvnq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vmvn))] @@ -32391,7 +32391,7 @@ pub fn vmvnq_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f16"))] #[cfg_attr( @@ -32413,7 +32413,7 @@ pub fn vneg_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f16"))] #[cfg_attr( @@ -32435,7 +32435,7 @@ pub fn vnegq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] @@ -32456,7 +32456,7 @@ pub fn vneg_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.f32"))] @@ -32477,7 +32477,7 @@ pub fn vnegq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] @@ -32498,7 +32498,7 @@ pub fn vneg_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s8"))] @@ -32519,7 +32519,7 @@ pub fn vnegq_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] @@ -32540,7 +32540,7 @@ pub fn vneg_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s16"))] @@ -32561,7 +32561,7 @@ pub fn vnegq_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vneg_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] @@ -32582,7 +32582,7 @@ pub fn vneg_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vnegq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vneg.s32"))] @@ -32603,7 +32603,7 @@ pub fn vnegq_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32625,7 +32625,7 @@ pub fn vorn_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32647,7 +32647,7 @@ pub fn vorn_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32669,7 +32669,7 @@ pub fn vorn_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32691,7 +32691,7 @@ pub fn vorn_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32713,7 +32713,7 @@ pub fn vornq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32735,7 +32735,7 @@ pub fn vornq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32757,7 +32757,7 @@ pub fn vornq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32779,7 +32779,7 @@ pub fn vornq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32801,7 +32801,7 @@ pub fn vorn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32823,7 +32823,7 @@ pub fn vorn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32845,7 +32845,7 @@ pub fn vorn_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorn_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32867,7 +32867,7 @@ pub fn vorn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32889,7 +32889,7 @@ pub fn vornq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32911,7 +32911,7 @@ pub fn vornq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32933,7 +32933,7 @@ pub fn vornq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Vector bitwise inclusive OR NOT"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vornq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorn))] @@ -32955,7 +32955,7 @@ pub fn vornq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -32976,7 +32976,7 @@ pub fn vorr_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -32997,7 +32997,7 @@ pub fn vorrq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33018,7 +33018,7 @@ pub fn vorr_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33039,7 +33039,7 @@ pub fn vorrq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33060,7 +33060,7 @@ pub fn vorr_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33081,7 +33081,7 @@ pub fn vorrq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33102,7 +33102,7 @@ pub fn vorr_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33123,7 +33123,7 @@ pub fn vorrq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33144,7 +33144,7 @@ pub fn vorr_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33165,7 +33165,7 @@ pub fn vorrq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33186,7 +33186,7 @@ pub fn vorr_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33207,7 +33207,7 @@ pub fn vorrq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33228,7 +33228,7 @@ pub fn vorr_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33249,7 +33249,7 @@ pub fn vorrq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorr_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33270,7 +33270,7 @@ pub fn vorr_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Vector bitwise or (immediate, inclusive)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vorrq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -33291,7 +33291,7 @@ pub fn vorrq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] @@ -33321,7 +33321,7 @@ pub fn vpadal_s8(a: int16x4_t, b: int8x8_t) -> int16x4_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s8"))] @@ -33351,7 +33351,7 @@ pub fn vpadalq_s8(a: int16x8_t, b: int8x16_t) -> int16x8_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] @@ -33381,7 +33381,7 @@ pub fn vpadal_s16(a: int32x2_t, b: int16x4_t) -> int32x2_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s16"))] @@ -33411,7 +33411,7 @@ pub fn vpadalq_s16(a: int32x4_t, b: int16x8_t) -> int32x4_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] @@ -33441,7 +33441,7 @@ pub fn vpadal_s32(a: int64x1_t, b: int32x2_t) -> int64x1_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.s32"))] @@ -33471,7 +33471,7 @@ pub fn vpadalq_s32(a: int64x2_t, b: int32x4_t) -> int64x2_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] @@ -33501,7 +33501,7 @@ pub fn vpadal_u8(a: uint16x4_t, b: uint8x8_t) -> uint16x4_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u8"))] @@ -33531,7 +33531,7 @@ pub fn vpadalq_u8(a: uint16x8_t, b: uint8x16_t) -> uint16x8_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] @@ -33561,7 +33561,7 @@ pub fn vpadal_u16(a: uint32x2_t, b: uint16x4_t) -> uint32x2_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u16"))] @@ -33591,7 +33591,7 @@ pub fn vpadalq_u16(a: uint32x4_t, b: uint16x8_t) -> uint32x4_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadal_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] @@ -33621,7 +33621,7 @@ pub fn vpadal_u32(a: uint64x1_t, b: uint32x2_t) -> uint64x1_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadalq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpadal.u32"))] @@ -33651,7 +33651,7 @@ pub fn vpadalq_u32(a: uint64x2_t, b: uint32x4_t) -> uint64x2_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] #[cfg_attr( @@ -33681,7 +33681,7 @@ pub fn vpadd_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point add pairwise"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] @@ -33710,7 +33710,7 @@ pub fn vpadd_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] @@ -33739,7 +33739,7 @@ pub fn vpadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] @@ -33768,7 +33768,7 @@ pub fn vpadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] @@ -33797,7 +33797,7 @@ pub fn vpadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] @@ -33818,7 +33818,7 @@ pub fn vpadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] @@ -33839,7 +33839,7 @@ pub fn vpadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Add pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpadd_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpadd))] @@ -33860,7 +33860,7 @@ pub fn vpadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] @@ -33889,7 +33889,7 @@ pub fn vpaddl_s8(a: int8x8_t) -> int16x4_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s8"))] @@ -33918,7 +33918,7 @@ pub fn vpaddlq_s8(a: int8x16_t) -> int16x8_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] @@ -33947,7 +33947,7 @@ pub fn vpaddl_s16(a: int16x4_t) -> int32x2_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s16"))] @@ -33976,7 +33976,7 @@ pub fn vpaddlq_s16(a: int16x8_t) -> int32x4_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] @@ -34005,7 +34005,7 @@ pub fn vpaddl_s32(a: int32x2_t) -> int64x1_t { } #[doc = "Signed Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.s32"))] @@ -34034,7 +34034,7 @@ pub fn vpaddlq_s32(a: int32x4_t) -> int64x2_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] @@ -34063,7 +34063,7 @@ pub fn vpaddl_u8(a: uint8x8_t) -> uint16x4_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u8"))] @@ -34092,7 +34092,7 @@ pub fn vpaddlq_u8(a: uint8x16_t) -> uint16x8_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] @@ -34121,7 +34121,7 @@ pub fn vpaddl_u16(a: uint16x4_t) -> uint32x2_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u16"))] @@ -34150,7 +34150,7 @@ pub fn vpaddlq_u16(a: uint16x8_t) -> uint32x4_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] @@ -34179,7 +34179,7 @@ pub fn vpaddl_u32(a: uint32x2_t) -> uint64x1_t { } #[doc = "Unsigned Add and Accumulate Long Pairwise."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpaddlq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vpaddl.u32"))] @@ -34208,7 +34208,7 @@ pub fn vpaddlq_u32(a: uint32x4_t) -> uint64x2_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] @@ -34237,7 +34237,7 @@ pub fn vpmax_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] @@ -34266,7 +34266,7 @@ pub fn vpmax_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] @@ -34295,7 +34295,7 @@ pub fn vpmax_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] @@ -34324,7 +34324,7 @@ pub fn vpmax_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] @@ -34353,7 +34353,7 @@ pub fn vpmax_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] @@ -34382,7 +34382,7 @@ pub fn vpmax_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Folding maximum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmax_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmax))] @@ -34411,7 +34411,7 @@ pub fn vpmax_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] @@ -34440,7 +34440,7 @@ pub fn vpmin_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] @@ -34469,7 +34469,7 @@ pub fn vpmin_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] @@ -34498,7 +34498,7 @@ pub fn vpmin_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] @@ -34527,7 +34527,7 @@ pub fn vpmin_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] @@ -34556,7 +34556,7 @@ pub fn vpmin_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] @@ -34585,7 +34585,7 @@ pub fn vpmin_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Folding minimum of adjacent pairs"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vpmin_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vpmin))] @@ -34614,7 +34614,7 @@ pub fn vpmin_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] @@ -34643,7 +34643,7 @@ pub fn vqabs_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s8"))] @@ -34672,7 +34672,7 @@ pub fn vqabsq_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] @@ -34701,7 +34701,7 @@ pub fn vqabs_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s16"))] @@ -34730,7 +34730,7 @@ pub fn vqabsq_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabs_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] @@ -34759,7 +34759,7 @@ pub fn vqabs_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Signed saturating Absolute value"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqabsq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqabs.s32"))] @@ -34788,7 +34788,7 @@ pub fn vqabsq_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] @@ -34809,7 +34809,7 @@ pub fn vqadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s8"))] @@ -34830,7 +34830,7 @@ pub fn vqaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] @@ -34851,7 +34851,7 @@ pub fn vqadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s16"))] @@ -34872,7 +34872,7 @@ pub fn vqaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] @@ -34893,7 +34893,7 @@ pub fn vqadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s32"))] @@ -34914,7 +34914,7 @@ pub fn vqaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] @@ -34935,7 +34935,7 @@ pub fn vqadd_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.s64"))] @@ -34956,7 +34956,7 @@ pub fn vqaddq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] @@ -34977,7 +34977,7 @@ pub fn vqadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u8"))] @@ -34998,7 +34998,7 @@ pub fn vqaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] @@ -35019,7 +35019,7 @@ pub fn vqadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u16"))] @@ -35040,7 +35040,7 @@ pub fn vqaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] @@ -35061,7 +35061,7 @@ pub fn vqadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u32"))] @@ -35082,7 +35082,7 @@ pub fn vqaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqadd_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] @@ -35103,7 +35103,7 @@ pub fn vqadd_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Saturating add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqaddq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqadd.u64"))] @@ -35124,7 +35124,7 @@ pub fn vqaddq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 2))] @@ -35147,7 +35147,7 @@ pub fn vqdmlal_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) } #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal, N = 1))] @@ -35170,7 +35170,7 @@ pub fn vqdmlal_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) } #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] @@ -35191,7 +35191,7 @@ pub fn vqdmlal_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { } #[doc = "Vector widening saturating doubling multiply accumulate with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] @@ -35212,7 +35212,7 @@ pub fn vqdmlal_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] @@ -35233,7 +35233,7 @@ pub fn vqdmlal_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { } #[doc = "Signed saturating doubling multiply-add long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlal_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlal))] @@ -35254,7 +35254,7 @@ pub fn vqdmlal_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { } #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 2))] @@ -35277,7 +35277,7 @@ pub fn vqdmlsl_lane_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) } #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl, N = 1))] @@ -35300,7 +35300,7 @@ pub fn vqdmlsl_lane_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) } #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] @@ -35321,7 +35321,7 @@ pub fn vqdmlsl_n_s16(a: int32x4_t, b: int16x4_t, c: i16) -> int32x4_t { } #[doc = "Vector widening saturating doubling multiply subtract with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] @@ -35342,7 +35342,7 @@ pub fn vqdmlsl_n_s32(a: int64x2_t, b: int32x2_t, c: i32) -> int64x2_t { } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] @@ -35363,7 +35363,7 @@ pub fn vqdmlsl_s16(a: int32x4_t, b: int16x4_t, c: int16x4_t) -> int32x4_t { } #[doc = "Signed saturating doubling multiply-subtract long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmlsl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmlsl))] @@ -35384,7 +35384,7 @@ pub fn vqdmlsl_s32(a: int64x2_t, b: int32x2_t, c: int32x2_t) -> int64x2_t { } #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] @@ -35407,7 +35407,7 @@ pub fn vqdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x4 } #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] @@ -35430,7 +35430,7 @@ pub fn vqdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16x } #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] @@ -35453,7 +35453,7 @@ pub fn vqdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x2 } #[doc = "Vector saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh, LANE = 0))] @@ -35476,7 +35476,7 @@ pub fn vqdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32x } #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] @@ -35498,7 +35498,7 @@ pub fn vqdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { } #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] @@ -35520,7 +35520,7 @@ pub fn vqdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { } #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] @@ -35542,7 +35542,7 @@ pub fn vqdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { } #[doc = "Vector saturating doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] @@ -35564,7 +35564,7 @@ pub fn vqdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { } #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] @@ -35593,7 +35593,7 @@ pub fn vqdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] @@ -35622,7 +35622,7 @@ pub fn vqdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulh_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] @@ -35651,7 +35651,7 @@ pub fn vqdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Signed saturating doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmulhq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmulh))] @@ -35680,7 +35680,7 @@ pub fn vqdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 2))] @@ -35706,7 +35706,7 @@ pub fn vqdmull_lane_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { } #[doc = "Vector saturating doubling long multiply by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull, N = 1))] @@ -35732,7 +35732,7 @@ pub fn vqdmull_lane_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { } #[doc = "Vector saturating doubling long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] @@ -35753,7 +35753,7 @@ pub fn vqdmull_n_s16(a: int16x4_t, b: i16) -> int32x4_t { } #[doc = "Vector saturating doubling long multiply with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] @@ -35774,7 +35774,7 @@ pub fn vqdmull_n_s32(a: int32x2_t, b: i32) -> int64x2_t { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] @@ -35803,7 +35803,7 @@ pub fn vqdmull_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { } #[doc = "Signed saturating doubling multiply long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqdmull_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqdmull))] @@ -35832,7 +35832,7 @@ pub fn vqdmull_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { } #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] @@ -35861,7 +35861,7 @@ pub fn vqmovn_s16(a: int16x8_t) -> int8x8_t { } #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] @@ -35890,7 +35890,7 @@ pub fn vqmovn_s32(a: int32x4_t) -> int16x4_t { } #[doc = "Signed saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] @@ -35919,7 +35919,7 @@ pub fn vqmovn_s64(a: int64x2_t) -> int32x2_t { } #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] @@ -35948,7 +35948,7 @@ pub fn vqmovn_u16(a: uint16x8_t) -> uint8x8_t { } #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] @@ -35977,7 +35977,7 @@ pub fn vqmovn_u32(a: uint32x4_t) -> uint16x4_t { } #[doc = "Unsigned saturating extract narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovn_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovn))] @@ -36006,7 +36006,7 @@ pub fn vqmovn_u64(a: uint64x2_t) -> uint32x2_t { } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] @@ -36035,7 +36035,7 @@ pub fn vqmovun_s16(a: int16x8_t) -> uint8x8_t { } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] @@ -36064,7 +36064,7 @@ pub fn vqmovun_s32(a: int32x4_t) -> uint16x4_t { } #[doc = "Signed saturating extract unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqmovun_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqmovun))] @@ -36093,7 +36093,7 @@ pub fn vqmovun_s64(a: int64x2_t) -> uint32x2_t { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] @@ -36122,7 +36122,7 @@ pub fn vqneg_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s8"))] @@ -36151,7 +36151,7 @@ pub fn vqnegq_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] @@ -36180,7 +36180,7 @@ pub fn vqneg_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s16"))] @@ -36209,7 +36209,7 @@ pub fn vqnegq_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqneg_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] @@ -36238,7 +36238,7 @@ pub fn vqneg_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Signed saturating negate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqnegq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqneg.s32"))] @@ -36267,7 +36267,7 @@ pub fn vqnegq_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] @@ -36294,7 +36294,7 @@ pub fn vqrdmulh_lane_s16(a: int16x4_t, b: int16x4_t) -> int16x4 } #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] @@ -36320,7 +36320,7 @@ pub fn vqrdmulh_lane_s32(a: int32x2_t, b: int32x2_t) -> int32x2 } #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] @@ -36347,7 +36347,7 @@ pub fn vqrdmulh_laneq_s16(a: int16x4_t, b: int16x8_t) -> int16x } #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] @@ -36373,7 +36373,7 @@ pub fn vqrdmulh_laneq_s32(a: int32x2_t, b: int32x4_t) -> int32x } #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] @@ -36412,7 +36412,7 @@ pub fn vqrdmulhq_lane_s16(a: int16x8_t, b: int16x4_t) -> int16x } #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] @@ -36439,7 +36439,7 @@ pub fn vqrdmulhq_lane_s32(a: int32x4_t, b: int32x2_t) -> int32x } #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] @@ -36478,7 +36478,7 @@ pub fn vqrdmulhq_laneq_s16(a: int16x8_t, b: int16x8_t) -> int16 } #[doc = "Vector rounding saturating doubling multiply high by scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_laneq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh, LANE = 1))] @@ -36505,7 +36505,7 @@ pub fn vqrdmulhq_laneq_s32(a: int32x4_t, b: int32x4_t) -> int32 } #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] @@ -36526,7 +36526,7 @@ pub fn vqrdmulh_n_s16(a: int16x4_t, b: i16) -> int16x4_t { } #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] @@ -36547,7 +36547,7 @@ pub fn vqrdmulhq_n_s16(a: int16x8_t, b: i16) -> int16x8_t { } #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] @@ -36568,7 +36568,7 @@ pub fn vqrdmulh_n_s32(a: int32x2_t, b: i32) -> int32x2_t { } #[doc = "Vector saturating rounding doubling multiply high with scalar"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] @@ -36589,7 +36589,7 @@ pub fn vqrdmulhq_n_s32(a: int32x4_t, b: i32) -> int32x4_t { } #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] @@ -36618,7 +36618,7 @@ pub fn vqrdmulh_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] @@ -36647,7 +36647,7 @@ pub fn vqrdmulhq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulh_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] @@ -36676,7 +36676,7 @@ pub fn vqrdmulh_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Signed saturating rounding doubling multiply returning high half"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrdmulhq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrdmulh))] @@ -36705,7 +36705,7 @@ pub fn vqrdmulhq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -36734,7 +36734,7 @@ pub fn vqrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -36763,7 +36763,7 @@ pub fn vqrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -36792,7 +36792,7 @@ pub fn vqrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -36821,7 +36821,7 @@ pub fn vqrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -36850,7 +36850,7 @@ pub fn vqrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -36879,7 +36879,7 @@ pub fn vqrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -36908,7 +36908,7 @@ pub fn vqrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -36937,7 +36937,7 @@ pub fn vqrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -36966,7 +36966,7 @@ pub fn vqrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -36995,7 +36995,7 @@ pub fn vqrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -37024,7 +37024,7 @@ pub fn vqrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -37053,7 +37053,7 @@ pub fn vqrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -37082,7 +37082,7 @@ pub fn vqrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -37111,7 +37111,7 @@ pub fn vqrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshl_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -37140,7 +37140,7 @@ pub fn vqrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Unsigned signed saturating rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshlq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqrshl))] @@ -37169,7 +37169,7 @@ pub fn vqrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] @@ -37185,7 +37185,7 @@ pub fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] @@ -37201,7 +37201,7 @@ pub fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] @@ -37217,7 +37217,7 @@ pub fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqrshrn, N = 2))] @@ -37236,7 +37236,7 @@ pub fn vqrshrn_n_s16(a: int16x8_t) -> int8x8_t { } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqrshrn, N = 2))] @@ -37255,7 +37255,7 @@ pub fn vqrshrn_n_s32(a: int32x4_t) -> int16x4_t { } #[doc = "Signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqrshrn, N = 2))] @@ -37274,7 +37274,7 @@ pub fn vqrshrn_n_s64(a: int64x2_t) -> int32x2_t { } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] @@ -37290,7 +37290,7 @@ pub fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] @@ -37306,7 +37306,7 @@ pub fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrn, N = 2))] @@ -37322,7 +37322,7 @@ pub fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(uqrshrn, N = 2))] @@ -37341,7 +37341,7 @@ pub fn vqrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(uqrshrn, N = 2))] @@ -37360,7 +37360,7 @@ pub fn vqrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { } #[doc = "Unsigned signed saturating rounded shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrn_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(uqrshrn, N = 2))] @@ -37379,7 +37379,7 @@ pub fn vqrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrun, N = 2))] @@ -37395,7 +37395,7 @@ pub fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrun, N = 2))] @@ -37411,7 +37411,7 @@ pub fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqrshrun, N = 2))] @@ -37427,7 +37427,7 @@ pub fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqrshrun, N = 2))] @@ -37446,7 +37446,7 @@ pub fn vqrshrun_n_s16(a: int16x8_t) -> uint8x8_t { } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqrshrun, N = 2))] @@ -37465,7 +37465,7 @@ pub fn vqrshrun_n_s32(a: int32x4_t) -> uint16x4_t { } #[doc = "Signed saturating rounded shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqrshrun_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqrshrun, N = 2))] @@ -37484,7 +37484,7 @@ pub fn vqrshrun_n_s64(a: int64x2_t) -> uint32x2_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37507,7 +37507,7 @@ pub fn vqshl_n_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37530,7 +37530,7 @@ pub fn vqshlq_n_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37553,7 +37553,7 @@ pub fn vqshl_n_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37576,7 +37576,7 @@ pub fn vqshlq_n_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37599,7 +37599,7 @@ pub fn vqshl_n_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37622,7 +37622,7 @@ pub fn vqshlq_n_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37645,7 +37645,7 @@ pub fn vqshl_n_s64(a: int64x1_t) -> int64x1_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37668,7 +37668,7 @@ pub fn vqshlq_n_s64(a: int64x2_t) -> int64x2_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37691,7 +37691,7 @@ pub fn vqshl_n_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37714,7 +37714,7 @@ pub fn vqshlq_n_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37737,7 +37737,7 @@ pub fn vqshl_n_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37760,7 +37760,7 @@ pub fn vqshlq_n_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37783,7 +37783,7 @@ pub fn vqshl_n_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37806,7 +37806,7 @@ pub fn vqshlq_n_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37829,7 +37829,7 @@ pub fn vqshl_n_u64(a: uint64x1_t) -> uint64x1_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl, N = 2))] @@ -37852,7 +37852,7 @@ pub fn vqshlq_n_u64(a: uint64x2_t) -> uint64x2_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -37881,7 +37881,7 @@ pub fn vqshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -37910,7 +37910,7 @@ pub fn vqshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -37939,7 +37939,7 @@ pub fn vqshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -37968,7 +37968,7 @@ pub fn vqshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -37997,7 +37997,7 @@ pub fn vqshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38026,7 +38026,7 @@ pub fn vqshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38055,7 +38055,7 @@ pub fn vqshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Signed saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38084,7 +38084,7 @@ pub fn vqshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38113,7 +38113,7 @@ pub fn vqshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38142,7 +38142,7 @@ pub fn vqshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38171,7 +38171,7 @@ pub fn vqshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38200,7 +38200,7 @@ pub fn vqshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38229,7 +38229,7 @@ pub fn vqshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38258,7 +38258,7 @@ pub fn vqshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshl_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38287,7 +38287,7 @@ pub fn vqshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Unsigned saturating shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vqshl))] @@ -38316,7 +38316,7 @@ pub fn vqshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] @@ -38332,7 +38332,7 @@ pub fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] @@ -38348,7 +38348,7 @@ pub fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] @@ -38364,7 +38364,7 @@ pub fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] @@ -38380,7 +38380,7 @@ pub fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] @@ -38396,7 +38396,7 @@ pub fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] @@ -38412,7 +38412,7 @@ pub fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] @@ -38428,7 +38428,7 @@ pub fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshlu, N = 2))] @@ -38444,7 +38444,7 @@ pub fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] @@ -38463,7 +38463,7 @@ pub fn vqshlu_n_s8(a: int8x8_t) -> uint8x8_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] @@ -38482,7 +38482,7 @@ pub fn vqshluq_n_s8(a: int8x16_t) -> uint8x16_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] @@ -38501,7 +38501,7 @@ pub fn vqshlu_n_s16(a: int16x4_t) -> uint16x4_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] @@ -38520,7 +38520,7 @@ pub fn vqshluq_n_s16(a: int16x8_t) -> uint16x8_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] @@ -38539,7 +38539,7 @@ pub fn vqshlu_n_s32(a: int32x2_t) -> uint32x2_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] @@ -38558,7 +38558,7 @@ pub fn vqshluq_n_s32(a: int32x4_t) -> uint32x4_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshlu_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] @@ -38577,7 +38577,7 @@ pub fn vqshlu_n_s64(a: int64x1_t) -> uint64x1_t { } #[doc = "Signed saturating shift left unsigned"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshluq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshlu, N = 2))] @@ -38596,7 +38596,7 @@ pub fn vqshluq_n_s64(a: int64x2_t) -> uint64x2_t { } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] @@ -38612,7 +38612,7 @@ pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] @@ -38628,7 +38628,7 @@ pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] @@ -38644,7 +38644,7 @@ pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshrn, N = 2))] @@ -38663,7 +38663,7 @@ pub fn vqshrn_n_s16(a: int16x8_t) -> int8x8_t { } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshrn, N = 2))] @@ -38682,7 +38682,7 @@ pub fn vqshrn_n_s32(a: int32x4_t) -> int16x4_t { } #[doc = "Signed saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshrn, N = 2))] @@ -38701,7 +38701,7 @@ pub fn vqshrn_n_s64(a: int64x2_t) -> int32x2_t { } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] @@ -38717,7 +38717,7 @@ pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] @@ -38733,7 +38733,7 @@ pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrn, N = 2))] @@ -38749,7 +38749,7 @@ pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(uqshrn, N = 2))] @@ -38768,7 +38768,7 @@ pub fn vqshrn_n_u16(a: uint16x8_t) -> uint8x8_t { } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(uqshrn, N = 2))] @@ -38787,7 +38787,7 @@ pub fn vqshrn_n_u32(a: uint32x4_t) -> uint16x4_t { } #[doc = "Unsigned saturating shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrn_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(uqshrn, N = 2))] @@ -38806,7 +38806,7 @@ pub fn vqshrn_n_u64(a: uint64x2_t) -> uint32x2_t { } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrun, N = 2))] @@ -38822,7 +38822,7 @@ pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrun, N = 2))] @@ -38838,7 +38838,7 @@ pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vqshrun, N = 2))] @@ -38854,7 +38854,7 @@ pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshrun, N = 2))] @@ -38873,7 +38873,7 @@ pub fn vqshrun_n_s16(a: int16x8_t) -> uint8x8_t { } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshrun, N = 2))] @@ -38892,7 +38892,7 @@ pub fn vqshrun_n_s32(a: int32x4_t) -> uint16x4_t { } #[doc = "Signed saturating shift right unsigned narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqshrun_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(sqshrun, N = 2))] @@ -38911,7 +38911,7 @@ pub fn vqshrun_n_s64(a: int64x2_t) -> uint32x2_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] @@ -38932,7 +38932,7 @@ pub fn vqsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s8"))] @@ -38953,7 +38953,7 @@ pub fn vqsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] @@ -38974,7 +38974,7 @@ pub fn vqsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s16"))] @@ -38995,7 +38995,7 @@ pub fn vqsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] @@ -39016,7 +39016,7 @@ pub fn vqsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s32"))] @@ -39037,7 +39037,7 @@ pub fn vqsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] @@ -39058,7 +39058,7 @@ pub fn vqsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.s64"))] @@ -39079,7 +39079,7 @@ pub fn vqsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] @@ -39100,7 +39100,7 @@ pub fn vqsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u8"))] @@ -39121,7 +39121,7 @@ pub fn vqsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] @@ -39142,7 +39142,7 @@ pub fn vqsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u16"))] @@ -39163,7 +39163,7 @@ pub fn vqsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] @@ -39184,7 +39184,7 @@ pub fn vqsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u32"))] @@ -39205,7 +39205,7 @@ pub fn vqsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsub_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] @@ -39226,7 +39226,7 @@ pub fn vqsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Saturating subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vqsubq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vqsub.u64"))] @@ -39247,7 +39247,7 @@ pub fn vqsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] @@ -39269,7 +39269,7 @@ pub fn vraddhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { } #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] @@ -39291,7 +39291,7 @@ pub fn vraddhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { } #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] @@ -39313,7 +39313,7 @@ pub fn vraddhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { } #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] @@ -39337,7 +39337,7 @@ pub fn vraddhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_ } #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] @@ -39361,7 +39361,7 @@ pub fn vraddhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8 } #[doc = "Rounding Add returning High Narrow (high half)."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_high_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] @@ -39385,7 +39385,7 @@ pub fn vraddhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4 } #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i16"))] @@ -39414,7 +39414,7 @@ pub fn vraddhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { } #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i32"))] @@ -39443,7 +39443,7 @@ pub fn vraddhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { } #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vraddhn.i64"))] @@ -39472,7 +39472,7 @@ pub fn vraddhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { } #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -39494,7 +39494,7 @@ pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { } #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -39521,7 +39521,7 @@ pub fn vraddhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { } #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -39543,7 +39543,7 @@ pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { } #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -39570,7 +39570,7 @@ pub fn vraddhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { } #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -39592,7 +39592,7 @@ pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { } #[doc = "Rounding Add returning High Narrow."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vraddhn_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -39619,7 +39619,7 @@ pub fn vraddhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { } #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( @@ -39649,7 +39649,7 @@ pub fn vrecpe_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] #[cfg_attr( @@ -39679,7 +39679,7 @@ pub fn vrecpeq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] @@ -39708,7 +39708,7 @@ pub fn vrecpe_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Reciprocal estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] @@ -39737,7 +39737,7 @@ pub fn vrecpeq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Unsigned reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpe_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] @@ -39766,7 +39766,7 @@ pub fn vrecpe_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Unsigned reciprocal estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpeq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecpe))] @@ -39795,7 +39795,7 @@ pub fn vrecpeq_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( @@ -39825,7 +39825,7 @@ pub fn vrecps_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] #[cfg_attr( @@ -39855,7 +39855,7 @@ pub fn vrecpsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecps_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] @@ -39884,7 +39884,7 @@ pub fn vrecps_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Floating-point reciprocal step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrecpsq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrecps))] @@ -39913,7 +39913,7 @@ pub fn vrecpsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -39936,7 +39936,7 @@ pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -39963,7 +39963,7 @@ pub fn vreinterpret_f32_f16(a: float16x4_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -39986,7 +39986,7 @@ pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40013,7 +40013,7 @@ pub fn vreinterpret_s8_f16(a: float16x4_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40036,7 +40036,7 @@ pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40063,7 +40063,7 @@ pub fn vreinterpret_s16_f16(a: float16x4_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40086,7 +40086,7 @@ pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40113,7 +40113,7 @@ pub fn vreinterpret_s32_f16(a: float16x4_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40136,7 +40136,7 @@ pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40162,7 +40162,7 @@ pub fn vreinterpret_s64_f16(a: float16x4_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40185,7 +40185,7 @@ pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40212,7 +40212,7 @@ pub fn vreinterpret_u8_f16(a: float16x4_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40235,7 +40235,7 @@ pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40262,7 +40262,7 @@ pub fn vreinterpret_u16_f16(a: float16x4_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40285,7 +40285,7 @@ pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40312,7 +40312,7 @@ pub fn vreinterpret_u32_f16(a: float16x4_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40335,7 +40335,7 @@ pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40361,7 +40361,7 @@ pub fn vreinterpret_u64_f16(a: float16x4_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40384,7 +40384,7 @@ pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40411,7 +40411,7 @@ pub fn vreinterpret_p8_f16(a: float16x4_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40434,7 +40434,7 @@ pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40461,7 +40461,7 @@ pub fn vreinterpret_p16_f16(a: float16x4_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40484,7 +40484,7 @@ pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40511,7 +40511,7 @@ pub fn vreinterpretq_f32_f16(a: float16x8_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40534,7 +40534,7 @@ pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40565,7 +40565,7 @@ pub fn vreinterpretq_s8_f16(a: float16x8_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40588,7 +40588,7 @@ pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40615,7 +40615,7 @@ pub fn vreinterpretq_s16_f16(a: float16x8_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40638,7 +40638,7 @@ pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40665,7 +40665,7 @@ pub fn vreinterpretq_s32_f16(a: float16x8_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40688,7 +40688,7 @@ pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40715,7 +40715,7 @@ pub fn vreinterpretq_s64_f16(a: float16x8_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40738,7 +40738,7 @@ pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40769,7 +40769,7 @@ pub fn vreinterpretq_u8_f16(a: float16x8_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40792,7 +40792,7 @@ pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40819,7 +40819,7 @@ pub fn vreinterpretq_u16_f16(a: float16x8_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40842,7 +40842,7 @@ pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40869,7 +40869,7 @@ pub fn vreinterpretq_u32_f16(a: float16x8_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40892,7 +40892,7 @@ pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40919,7 +40919,7 @@ pub fn vreinterpretq_u64_f16(a: float16x8_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40942,7 +40942,7 @@ pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40973,7 +40973,7 @@ pub fn vreinterpretq_p8_f16(a: float16x8_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -40996,7 +40996,7 @@ pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41023,7 +41023,7 @@ pub fn vreinterpretq_p16_f16(a: float16x8_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41046,7 +41046,7 @@ pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41073,7 +41073,7 @@ pub fn vreinterpret_f16_f32(a: float32x2_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41096,7 +41096,7 @@ pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41123,7 +41123,7 @@ pub fn vreinterpretq_f16_f32(a: float32x4_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41146,7 +41146,7 @@ pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41173,7 +41173,7 @@ pub fn vreinterpret_f16_s8(a: int8x8_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41196,7 +41196,7 @@ pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41224,7 +41224,7 @@ pub fn vreinterpretq_f16_s8(a: int8x16_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41247,7 +41247,7 @@ pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41274,7 +41274,7 @@ pub fn vreinterpret_f16_s16(a: int16x4_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41297,7 +41297,7 @@ pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41324,7 +41324,7 @@ pub fn vreinterpretq_f16_s16(a: int16x8_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41347,7 +41347,7 @@ pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41374,7 +41374,7 @@ pub fn vreinterpret_f16_s32(a: int32x2_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41397,7 +41397,7 @@ pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41424,7 +41424,7 @@ pub fn vreinterpretq_f16_s32(a: int32x4_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41447,7 +41447,7 @@ pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41473,7 +41473,7 @@ pub fn vreinterpret_f16_s64(a: int64x1_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41496,7 +41496,7 @@ pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41523,7 +41523,7 @@ pub fn vreinterpretq_f16_s64(a: int64x2_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41546,7 +41546,7 @@ pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41573,7 +41573,7 @@ pub fn vreinterpret_f16_u8(a: uint8x8_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41596,7 +41596,7 @@ pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41624,7 +41624,7 @@ pub fn vreinterpretq_f16_u8(a: uint8x16_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41647,7 +41647,7 @@ pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41674,7 +41674,7 @@ pub fn vreinterpret_f16_u16(a: uint16x4_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41697,7 +41697,7 @@ pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41724,7 +41724,7 @@ pub fn vreinterpretq_f16_u16(a: uint16x8_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41747,7 +41747,7 @@ pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41774,7 +41774,7 @@ pub fn vreinterpret_f16_u32(a: uint32x2_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41797,7 +41797,7 @@ pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41824,7 +41824,7 @@ pub fn vreinterpretq_f16_u32(a: uint32x4_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41847,7 +41847,7 @@ pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41873,7 +41873,7 @@ pub fn vreinterpret_f16_u64(a: uint64x1_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41896,7 +41896,7 @@ pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41923,7 +41923,7 @@ pub fn vreinterpretq_f16_u64(a: uint64x2_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41946,7 +41946,7 @@ pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41973,7 +41973,7 @@ pub fn vreinterpret_f16_p8(a: poly8x8_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -41996,7 +41996,7 @@ pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42024,7 +42024,7 @@ pub fn vreinterpretq_f16_p8(a: poly8x16_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42047,7 +42047,7 @@ pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42074,7 +42074,7 @@ pub fn vreinterpret_f16_p16(a: poly16x4_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42097,7 +42097,7 @@ pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42124,7 +42124,7 @@ pub fn vreinterpretq_f16_p16(a: poly16x8_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42147,7 +42147,7 @@ pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42173,7 +42173,7 @@ pub fn vreinterpretq_f16_p128(a: p128) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42196,7 +42196,7 @@ pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42222,7 +42222,7 @@ pub fn vreinterpret_p64_f16(a: float16x4_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42245,7 +42245,7 @@ pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42271,7 +42271,7 @@ pub fn vreinterpretq_p128_f16(a: float16x8_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42294,7 +42294,7 @@ pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_f16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42321,7 +42321,7 @@ pub fn vreinterpretq_p64_f16(a: float16x8_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42344,7 +42344,7 @@ pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42370,7 +42370,7 @@ pub fn vreinterpret_f16_p64(a: poly64x1_t) -> float16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42393,7 +42393,7 @@ pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -42420,7 +42420,7 @@ pub fn vreinterpretq_f16_p64(a: poly64x2_t) -> float16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42442,7 +42442,7 @@ pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42467,7 +42467,7 @@ pub fn vreinterpretq_f32_p128(a: p128) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42489,7 +42489,7 @@ pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42515,7 +42515,7 @@ pub fn vreinterpret_s8_f32(a: float32x2_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42537,7 +42537,7 @@ pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42563,7 +42563,7 @@ pub fn vreinterpret_s16_f32(a: float32x2_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42585,7 +42585,7 @@ pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42611,7 +42611,7 @@ pub fn vreinterpret_s32_f32(a: float32x2_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42633,7 +42633,7 @@ pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42658,7 +42658,7 @@ pub fn vreinterpret_s64_f32(a: float32x2_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42680,7 +42680,7 @@ pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42706,7 +42706,7 @@ pub fn vreinterpret_u8_f32(a: float32x2_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42728,7 +42728,7 @@ pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42754,7 +42754,7 @@ pub fn vreinterpret_u16_f32(a: float32x2_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42776,7 +42776,7 @@ pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42802,7 +42802,7 @@ pub fn vreinterpret_u32_f32(a: float32x2_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42824,7 +42824,7 @@ pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42849,7 +42849,7 @@ pub fn vreinterpret_u64_f32(a: float32x2_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42871,7 +42871,7 @@ pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42897,7 +42897,7 @@ pub fn vreinterpret_p8_f32(a: float32x2_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42919,7 +42919,7 @@ pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42945,7 +42945,7 @@ pub fn vreinterpret_p16_f32(a: float32x2_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42967,7 +42967,7 @@ pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -42992,7 +42992,7 @@ pub fn vreinterpretq_p128_f32(a: float32x4_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43014,7 +43014,7 @@ pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43044,7 +43044,7 @@ pub fn vreinterpretq_s8_f32(a: float32x4_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43066,7 +43066,7 @@ pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43092,7 +43092,7 @@ pub fn vreinterpretq_s16_f32(a: float32x4_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43114,7 +43114,7 @@ pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43140,7 +43140,7 @@ pub fn vreinterpretq_s32_f32(a: float32x4_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43162,7 +43162,7 @@ pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43188,7 +43188,7 @@ pub fn vreinterpretq_s64_f32(a: float32x4_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43210,7 +43210,7 @@ pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43240,7 +43240,7 @@ pub fn vreinterpretq_u8_f32(a: float32x4_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43262,7 +43262,7 @@ pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43288,7 +43288,7 @@ pub fn vreinterpretq_u16_f32(a: float32x4_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43310,7 +43310,7 @@ pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43336,7 +43336,7 @@ pub fn vreinterpretq_u32_f32(a: float32x4_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43358,7 +43358,7 @@ pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43384,7 +43384,7 @@ pub fn vreinterpretq_u64_f32(a: float32x4_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43406,7 +43406,7 @@ pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43436,7 +43436,7 @@ pub fn vreinterpretq_p8_f32(a: float32x4_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43458,7 +43458,7 @@ pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_f32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43484,7 +43484,7 @@ pub fn vreinterpretq_p16_f32(a: float32x4_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43506,7 +43506,7 @@ pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43532,7 +43532,7 @@ pub fn vreinterpret_f32_s8(a: int8x8_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43554,7 +43554,7 @@ pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43580,7 +43580,7 @@ pub fn vreinterpret_s16_s8(a: int8x8_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43602,7 +43602,7 @@ pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43628,7 +43628,7 @@ pub fn vreinterpret_s32_s8(a: int8x8_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43650,7 +43650,7 @@ pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43675,7 +43675,7 @@ pub fn vreinterpret_s64_s8(a: int8x8_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43697,7 +43697,7 @@ pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43723,7 +43723,7 @@ pub fn vreinterpret_u8_s8(a: int8x8_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43745,7 +43745,7 @@ pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43771,7 +43771,7 @@ pub fn vreinterpret_u16_s8(a: int8x8_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43793,7 +43793,7 @@ pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43819,7 +43819,7 @@ pub fn vreinterpret_u32_s8(a: int8x8_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43841,7 +43841,7 @@ pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43866,7 +43866,7 @@ pub fn vreinterpret_u64_s8(a: int8x8_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43888,7 +43888,7 @@ pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43914,7 +43914,7 @@ pub fn vreinterpret_p8_s8(a: int8x8_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43936,7 +43936,7 @@ pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43962,7 +43962,7 @@ pub fn vreinterpret_p16_s8(a: int8x8_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -43984,7 +43984,7 @@ pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44011,7 +44011,7 @@ pub fn vreinterpretq_f32_s8(a: int8x16_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44033,7 +44033,7 @@ pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44060,7 +44060,7 @@ pub fn vreinterpretq_s16_s8(a: int8x16_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44082,7 +44082,7 @@ pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44109,7 +44109,7 @@ pub fn vreinterpretq_s32_s8(a: int8x16_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44131,7 +44131,7 @@ pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44158,7 +44158,7 @@ pub fn vreinterpretq_s64_s8(a: int8x16_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44180,7 +44180,7 @@ pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44211,7 +44211,7 @@ pub fn vreinterpretq_u8_s8(a: int8x16_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44233,7 +44233,7 @@ pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44260,7 +44260,7 @@ pub fn vreinterpretq_u16_s8(a: int8x16_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44282,7 +44282,7 @@ pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44309,7 +44309,7 @@ pub fn vreinterpretq_u32_s8(a: int8x16_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44331,7 +44331,7 @@ pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44358,7 +44358,7 @@ pub fn vreinterpretq_u64_s8(a: int8x16_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44380,7 +44380,7 @@ pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44411,7 +44411,7 @@ pub fn vreinterpretq_p8_s8(a: int8x16_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44433,7 +44433,7 @@ pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44460,7 +44460,7 @@ pub fn vreinterpretq_p16_s8(a: int8x16_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44482,7 +44482,7 @@ pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44508,7 +44508,7 @@ pub fn vreinterpret_f32_s16(a: int16x4_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44530,7 +44530,7 @@ pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44556,7 +44556,7 @@ pub fn vreinterpret_s8_s16(a: int16x4_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44578,7 +44578,7 @@ pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44604,7 +44604,7 @@ pub fn vreinterpret_s32_s16(a: int16x4_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44626,7 +44626,7 @@ pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44651,7 +44651,7 @@ pub fn vreinterpret_s64_s16(a: int16x4_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44673,7 +44673,7 @@ pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44699,7 +44699,7 @@ pub fn vreinterpret_u8_s16(a: int16x4_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44721,7 +44721,7 @@ pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44747,7 +44747,7 @@ pub fn vreinterpret_u16_s16(a: int16x4_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44769,7 +44769,7 @@ pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44795,7 +44795,7 @@ pub fn vreinterpret_u32_s16(a: int16x4_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44817,7 +44817,7 @@ pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44842,7 +44842,7 @@ pub fn vreinterpret_u64_s16(a: int16x4_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44864,7 +44864,7 @@ pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44890,7 +44890,7 @@ pub fn vreinterpret_p8_s16(a: int16x4_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44912,7 +44912,7 @@ pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44938,7 +44938,7 @@ pub fn vreinterpret_p16_s16(a: int16x4_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44960,7 +44960,7 @@ pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -44986,7 +44986,7 @@ pub fn vreinterpretq_f32_s16(a: int16x8_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45008,7 +45008,7 @@ pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45038,7 +45038,7 @@ pub fn vreinterpretq_s8_s16(a: int16x8_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45060,7 +45060,7 @@ pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45086,7 +45086,7 @@ pub fn vreinterpretq_s32_s16(a: int16x8_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45108,7 +45108,7 @@ pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45134,7 +45134,7 @@ pub fn vreinterpretq_s64_s16(a: int16x8_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45156,7 +45156,7 @@ pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45186,7 +45186,7 @@ pub fn vreinterpretq_u8_s16(a: int16x8_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45208,7 +45208,7 @@ pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45234,7 +45234,7 @@ pub fn vreinterpretq_u16_s16(a: int16x8_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45256,7 +45256,7 @@ pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45282,7 +45282,7 @@ pub fn vreinterpretq_u32_s16(a: int16x8_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45304,7 +45304,7 @@ pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45330,7 +45330,7 @@ pub fn vreinterpretq_u64_s16(a: int16x8_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45352,7 +45352,7 @@ pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45382,7 +45382,7 @@ pub fn vreinterpretq_p8_s16(a: int16x8_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45404,7 +45404,7 @@ pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45430,7 +45430,7 @@ pub fn vreinterpretq_p16_s16(a: int16x8_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45452,7 +45452,7 @@ pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45478,7 +45478,7 @@ pub fn vreinterpret_f32_s32(a: int32x2_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45500,7 +45500,7 @@ pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45526,7 +45526,7 @@ pub fn vreinterpret_s8_s32(a: int32x2_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45548,7 +45548,7 @@ pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45574,7 +45574,7 @@ pub fn vreinterpret_s16_s32(a: int32x2_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45596,7 +45596,7 @@ pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45621,7 +45621,7 @@ pub fn vreinterpret_s64_s32(a: int32x2_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45643,7 +45643,7 @@ pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45669,7 +45669,7 @@ pub fn vreinterpret_u8_s32(a: int32x2_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45691,7 +45691,7 @@ pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45717,7 +45717,7 @@ pub fn vreinterpret_u16_s32(a: int32x2_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45739,7 +45739,7 @@ pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45765,7 +45765,7 @@ pub fn vreinterpret_u32_s32(a: int32x2_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45787,7 +45787,7 @@ pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45812,7 +45812,7 @@ pub fn vreinterpret_u64_s32(a: int32x2_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45834,7 +45834,7 @@ pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45860,7 +45860,7 @@ pub fn vreinterpret_p8_s32(a: int32x2_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45882,7 +45882,7 @@ pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45908,7 +45908,7 @@ pub fn vreinterpret_p16_s32(a: int32x2_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45930,7 +45930,7 @@ pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45956,7 +45956,7 @@ pub fn vreinterpretq_f32_s32(a: int32x4_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -45978,7 +45978,7 @@ pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46008,7 +46008,7 @@ pub fn vreinterpretq_s8_s32(a: int32x4_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46030,7 +46030,7 @@ pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46056,7 +46056,7 @@ pub fn vreinterpretq_s16_s32(a: int32x4_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46078,7 +46078,7 @@ pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46104,7 +46104,7 @@ pub fn vreinterpretq_s64_s32(a: int32x4_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46126,7 +46126,7 @@ pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46156,7 +46156,7 @@ pub fn vreinterpretq_u8_s32(a: int32x4_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46178,7 +46178,7 @@ pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46204,7 +46204,7 @@ pub fn vreinterpretq_u16_s32(a: int32x4_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46226,7 +46226,7 @@ pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46252,7 +46252,7 @@ pub fn vreinterpretq_u32_s32(a: int32x4_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46274,7 +46274,7 @@ pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46300,7 +46300,7 @@ pub fn vreinterpretq_u64_s32(a: int32x4_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46322,7 +46322,7 @@ pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46352,7 +46352,7 @@ pub fn vreinterpretq_p8_s32(a: int32x4_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46374,7 +46374,7 @@ pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46400,7 +46400,7 @@ pub fn vreinterpretq_p16_s32(a: int32x4_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46422,7 +46422,7 @@ pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46447,7 +46447,7 @@ pub fn vreinterpret_f32_s64(a: int64x1_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46469,7 +46469,7 @@ pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46494,7 +46494,7 @@ pub fn vreinterpret_s8_s64(a: int64x1_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46516,7 +46516,7 @@ pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46541,7 +46541,7 @@ pub fn vreinterpret_s16_s64(a: int64x1_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46563,7 +46563,7 @@ pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46588,7 +46588,7 @@ pub fn vreinterpret_s32_s64(a: int64x1_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46610,7 +46610,7 @@ pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46635,7 +46635,7 @@ pub fn vreinterpret_u8_s64(a: int64x1_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46657,7 +46657,7 @@ pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46682,7 +46682,7 @@ pub fn vreinterpret_u16_s64(a: int64x1_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46704,7 +46704,7 @@ pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46729,7 +46729,7 @@ pub fn vreinterpret_u32_s64(a: int64x1_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -46750,7 +46750,7 @@ pub fn vreinterpret_u64_s64(a: int64x1_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46772,7 +46772,7 @@ pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46797,7 +46797,7 @@ pub fn vreinterpret_p8_s64(a: int64x1_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46819,7 +46819,7 @@ pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46844,7 +46844,7 @@ pub fn vreinterpret_p16_s64(a: int64x1_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46866,7 +46866,7 @@ pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46892,7 +46892,7 @@ pub fn vreinterpretq_f32_s64(a: int64x2_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46914,7 +46914,7 @@ pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46944,7 +46944,7 @@ pub fn vreinterpretq_s8_s64(a: int64x2_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46966,7 +46966,7 @@ pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -46992,7 +46992,7 @@ pub fn vreinterpretq_s16_s64(a: int64x2_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47014,7 +47014,7 @@ pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47040,7 +47040,7 @@ pub fn vreinterpretq_s32_s64(a: int64x2_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47062,7 +47062,7 @@ pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47092,7 +47092,7 @@ pub fn vreinterpretq_u8_s64(a: int64x2_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47114,7 +47114,7 @@ pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47140,7 +47140,7 @@ pub fn vreinterpretq_u16_s64(a: int64x2_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47162,7 +47162,7 @@ pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47188,7 +47188,7 @@ pub fn vreinterpretq_u32_s64(a: int64x2_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47210,7 +47210,7 @@ pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47236,7 +47236,7 @@ pub fn vreinterpretq_u64_s64(a: int64x2_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47258,7 +47258,7 @@ pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47288,7 +47288,7 @@ pub fn vreinterpretq_p8_s64(a: int64x2_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47310,7 +47310,7 @@ pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47336,7 +47336,7 @@ pub fn vreinterpretq_p16_s64(a: int64x2_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47358,7 +47358,7 @@ pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47384,7 +47384,7 @@ pub fn vreinterpret_f32_u8(a: uint8x8_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47406,7 +47406,7 @@ pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47432,7 +47432,7 @@ pub fn vreinterpret_s8_u8(a: uint8x8_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47454,7 +47454,7 @@ pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47480,7 +47480,7 @@ pub fn vreinterpret_s16_u8(a: uint8x8_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47502,7 +47502,7 @@ pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47528,7 +47528,7 @@ pub fn vreinterpret_s32_u8(a: uint8x8_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47550,7 +47550,7 @@ pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47575,7 +47575,7 @@ pub fn vreinterpret_s64_u8(a: uint8x8_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47597,7 +47597,7 @@ pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47623,7 +47623,7 @@ pub fn vreinterpret_u16_u8(a: uint8x8_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47645,7 +47645,7 @@ pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47671,7 +47671,7 @@ pub fn vreinterpret_u32_u8(a: uint8x8_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47693,7 +47693,7 @@ pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47718,7 +47718,7 @@ pub fn vreinterpret_u64_u8(a: uint8x8_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47740,7 +47740,7 @@ pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47766,7 +47766,7 @@ pub fn vreinterpret_p8_u8(a: uint8x8_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47788,7 +47788,7 @@ pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47814,7 +47814,7 @@ pub fn vreinterpret_p16_u8(a: uint8x8_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47836,7 +47836,7 @@ pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47863,7 +47863,7 @@ pub fn vreinterpretq_f32_u8(a: uint8x16_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47885,7 +47885,7 @@ pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47916,7 +47916,7 @@ pub fn vreinterpretq_s8_u8(a: uint8x16_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47938,7 +47938,7 @@ pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47965,7 +47965,7 @@ pub fn vreinterpretq_s16_u8(a: uint8x16_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -47987,7 +47987,7 @@ pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48014,7 +48014,7 @@ pub fn vreinterpretq_s32_u8(a: uint8x16_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48036,7 +48036,7 @@ pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48063,7 +48063,7 @@ pub fn vreinterpretq_s64_u8(a: uint8x16_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48085,7 +48085,7 @@ pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48112,7 +48112,7 @@ pub fn vreinterpretq_u16_u8(a: uint8x16_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48134,7 +48134,7 @@ pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48161,7 +48161,7 @@ pub fn vreinterpretq_u32_u8(a: uint8x16_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48183,7 +48183,7 @@ pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48210,7 +48210,7 @@ pub fn vreinterpretq_u64_u8(a: uint8x16_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48232,7 +48232,7 @@ pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48263,7 +48263,7 @@ pub fn vreinterpretq_p8_u8(a: uint8x16_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48285,7 +48285,7 @@ pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48312,7 +48312,7 @@ pub fn vreinterpretq_p16_u8(a: uint8x16_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48334,7 +48334,7 @@ pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48360,7 +48360,7 @@ pub fn vreinterpret_f32_u16(a: uint16x4_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48382,7 +48382,7 @@ pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48408,7 +48408,7 @@ pub fn vreinterpret_s8_u16(a: uint16x4_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48430,7 +48430,7 @@ pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48456,7 +48456,7 @@ pub fn vreinterpret_s16_u16(a: uint16x4_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48478,7 +48478,7 @@ pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48504,7 +48504,7 @@ pub fn vreinterpret_s32_u16(a: uint16x4_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48526,7 +48526,7 @@ pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48551,7 +48551,7 @@ pub fn vreinterpret_s64_u16(a: uint16x4_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48573,7 +48573,7 @@ pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48599,7 +48599,7 @@ pub fn vreinterpret_u8_u16(a: uint16x4_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48621,7 +48621,7 @@ pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48647,7 +48647,7 @@ pub fn vreinterpret_u32_u16(a: uint16x4_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48669,7 +48669,7 @@ pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48694,7 +48694,7 @@ pub fn vreinterpret_u64_u16(a: uint16x4_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48716,7 +48716,7 @@ pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48742,7 +48742,7 @@ pub fn vreinterpret_p8_u16(a: uint16x4_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48764,7 +48764,7 @@ pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48790,7 +48790,7 @@ pub fn vreinterpret_p16_u16(a: uint16x4_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48812,7 +48812,7 @@ pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48838,7 +48838,7 @@ pub fn vreinterpretq_f32_u16(a: uint16x8_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48860,7 +48860,7 @@ pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48890,7 +48890,7 @@ pub fn vreinterpretq_s8_u16(a: uint16x8_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48912,7 +48912,7 @@ pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48938,7 +48938,7 @@ pub fn vreinterpretq_s16_u16(a: uint16x8_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48960,7 +48960,7 @@ pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -48986,7 +48986,7 @@ pub fn vreinterpretq_s32_u16(a: uint16x8_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49008,7 +49008,7 @@ pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49034,7 +49034,7 @@ pub fn vreinterpretq_s64_u16(a: uint16x8_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49056,7 +49056,7 @@ pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49086,7 +49086,7 @@ pub fn vreinterpretq_u8_u16(a: uint16x8_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49108,7 +49108,7 @@ pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49134,7 +49134,7 @@ pub fn vreinterpretq_u32_u16(a: uint16x8_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49156,7 +49156,7 @@ pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49182,7 +49182,7 @@ pub fn vreinterpretq_u64_u16(a: uint16x8_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49204,7 +49204,7 @@ pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49234,7 +49234,7 @@ pub fn vreinterpretq_p8_u16(a: uint16x8_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49256,7 +49256,7 @@ pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49282,7 +49282,7 @@ pub fn vreinterpretq_p16_u16(a: uint16x8_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49304,7 +49304,7 @@ pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49330,7 +49330,7 @@ pub fn vreinterpret_f32_u32(a: uint32x2_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49352,7 +49352,7 @@ pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49378,7 +49378,7 @@ pub fn vreinterpret_s8_u32(a: uint32x2_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49400,7 +49400,7 @@ pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49426,7 +49426,7 @@ pub fn vreinterpret_s16_u32(a: uint32x2_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49448,7 +49448,7 @@ pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49474,7 +49474,7 @@ pub fn vreinterpret_s32_u32(a: uint32x2_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49496,7 +49496,7 @@ pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49521,7 +49521,7 @@ pub fn vreinterpret_s64_u32(a: uint32x2_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49543,7 +49543,7 @@ pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49569,7 +49569,7 @@ pub fn vreinterpret_u8_u32(a: uint32x2_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49591,7 +49591,7 @@ pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49617,7 +49617,7 @@ pub fn vreinterpret_u16_u32(a: uint32x2_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49639,7 +49639,7 @@ pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49664,7 +49664,7 @@ pub fn vreinterpret_u64_u32(a: uint32x2_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49686,7 +49686,7 @@ pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49712,7 +49712,7 @@ pub fn vreinterpret_p8_u32(a: uint32x2_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49734,7 +49734,7 @@ pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49760,7 +49760,7 @@ pub fn vreinterpret_p16_u32(a: uint32x2_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49782,7 +49782,7 @@ pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49808,7 +49808,7 @@ pub fn vreinterpretq_f32_u32(a: uint32x4_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49830,7 +49830,7 @@ pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49860,7 +49860,7 @@ pub fn vreinterpretq_s8_u32(a: uint32x4_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49882,7 +49882,7 @@ pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49908,7 +49908,7 @@ pub fn vreinterpretq_s16_u32(a: uint32x4_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49930,7 +49930,7 @@ pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49956,7 +49956,7 @@ pub fn vreinterpretq_s32_u32(a: uint32x4_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -49978,7 +49978,7 @@ pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50004,7 +50004,7 @@ pub fn vreinterpretq_s64_u32(a: uint32x4_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50026,7 +50026,7 @@ pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50056,7 +50056,7 @@ pub fn vreinterpretq_u8_u32(a: uint32x4_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50078,7 +50078,7 @@ pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50104,7 +50104,7 @@ pub fn vreinterpretq_u16_u32(a: uint32x4_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50126,7 +50126,7 @@ pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50152,7 +50152,7 @@ pub fn vreinterpretq_u64_u32(a: uint32x4_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50174,7 +50174,7 @@ pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50204,7 +50204,7 @@ pub fn vreinterpretq_p8_u32(a: uint32x4_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50226,7 +50226,7 @@ pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50252,7 +50252,7 @@ pub fn vreinterpretq_p16_u32(a: uint32x4_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50274,7 +50274,7 @@ pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50299,7 +50299,7 @@ pub fn vreinterpret_f32_u64(a: uint64x1_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50321,7 +50321,7 @@ pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50346,7 +50346,7 @@ pub fn vreinterpret_s8_u64(a: uint64x1_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50368,7 +50368,7 @@ pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50393,7 +50393,7 @@ pub fn vreinterpret_s16_u64(a: uint64x1_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50415,7 +50415,7 @@ pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50440,7 +50440,7 @@ pub fn vreinterpret_s32_u64(a: uint64x1_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -50461,7 +50461,7 @@ pub fn vreinterpret_s64_u64(a: uint64x1_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50483,7 +50483,7 @@ pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50508,7 +50508,7 @@ pub fn vreinterpret_u8_u64(a: uint64x1_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50530,7 +50530,7 @@ pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50555,7 +50555,7 @@ pub fn vreinterpret_u16_u64(a: uint64x1_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50577,7 +50577,7 @@ pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50602,7 +50602,7 @@ pub fn vreinterpret_u32_u64(a: uint64x1_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50624,7 +50624,7 @@ pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50649,7 +50649,7 @@ pub fn vreinterpret_p8_u64(a: uint64x1_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50671,7 +50671,7 @@ pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50696,7 +50696,7 @@ pub fn vreinterpret_p16_u64(a: uint64x1_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50718,7 +50718,7 @@ pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50744,7 +50744,7 @@ pub fn vreinterpretq_f32_u64(a: uint64x2_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50766,7 +50766,7 @@ pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50796,7 +50796,7 @@ pub fn vreinterpretq_s8_u64(a: uint64x2_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50818,7 +50818,7 @@ pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50844,7 +50844,7 @@ pub fn vreinterpretq_s16_u64(a: uint64x2_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50866,7 +50866,7 @@ pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50892,7 +50892,7 @@ pub fn vreinterpretq_s32_u64(a: uint64x2_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50914,7 +50914,7 @@ pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50940,7 +50940,7 @@ pub fn vreinterpretq_s64_u64(a: uint64x2_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50962,7 +50962,7 @@ pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -50992,7 +50992,7 @@ pub fn vreinterpretq_u8_u64(a: uint64x2_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51014,7 +51014,7 @@ pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51040,7 +51040,7 @@ pub fn vreinterpretq_u16_u64(a: uint64x2_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51062,7 +51062,7 @@ pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51088,7 +51088,7 @@ pub fn vreinterpretq_u32_u64(a: uint64x2_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51110,7 +51110,7 @@ pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51140,7 +51140,7 @@ pub fn vreinterpretq_p8_u64(a: uint64x2_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51162,7 +51162,7 @@ pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51188,7 +51188,7 @@ pub fn vreinterpretq_p16_u64(a: uint64x2_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51210,7 +51210,7 @@ pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51236,7 +51236,7 @@ pub fn vreinterpret_f32_p8(a: poly8x8_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51258,7 +51258,7 @@ pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51284,7 +51284,7 @@ pub fn vreinterpret_s8_p8(a: poly8x8_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51306,7 +51306,7 @@ pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51332,7 +51332,7 @@ pub fn vreinterpret_s16_p8(a: poly8x8_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51354,7 +51354,7 @@ pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51380,7 +51380,7 @@ pub fn vreinterpret_s32_p8(a: poly8x8_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51402,7 +51402,7 @@ pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51427,7 +51427,7 @@ pub fn vreinterpret_s64_p8(a: poly8x8_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51449,7 +51449,7 @@ pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51475,7 +51475,7 @@ pub fn vreinterpret_u8_p8(a: poly8x8_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51497,7 +51497,7 @@ pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51523,7 +51523,7 @@ pub fn vreinterpret_u16_p8(a: poly8x8_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51545,7 +51545,7 @@ pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51571,7 +51571,7 @@ pub fn vreinterpret_u32_p8(a: poly8x8_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51593,7 +51593,7 @@ pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51618,7 +51618,7 @@ pub fn vreinterpret_u64_p8(a: poly8x8_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51640,7 +51640,7 @@ pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51666,7 +51666,7 @@ pub fn vreinterpret_p16_p8(a: poly8x8_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51688,7 +51688,7 @@ pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51715,7 +51715,7 @@ pub fn vreinterpretq_f32_p8(a: poly8x16_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51737,7 +51737,7 @@ pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51768,7 +51768,7 @@ pub fn vreinterpretq_s8_p8(a: poly8x16_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51790,7 +51790,7 @@ pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51817,7 +51817,7 @@ pub fn vreinterpretq_s16_p8(a: poly8x16_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51839,7 +51839,7 @@ pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51866,7 +51866,7 @@ pub fn vreinterpretq_s32_p8(a: poly8x16_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51888,7 +51888,7 @@ pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51915,7 +51915,7 @@ pub fn vreinterpretq_s64_p8(a: poly8x16_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51937,7 +51937,7 @@ pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51968,7 +51968,7 @@ pub fn vreinterpretq_u8_p8(a: poly8x16_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -51990,7 +51990,7 @@ pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52017,7 +52017,7 @@ pub fn vreinterpretq_u16_p8(a: poly8x16_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52039,7 +52039,7 @@ pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52066,7 +52066,7 @@ pub fn vreinterpretq_u32_p8(a: poly8x16_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52088,7 +52088,7 @@ pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52115,7 +52115,7 @@ pub fn vreinterpretq_u64_p8(a: poly8x16_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52137,7 +52137,7 @@ pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52164,7 +52164,7 @@ pub fn vreinterpretq_p16_p8(a: poly8x16_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52186,7 +52186,7 @@ pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_f32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52212,7 +52212,7 @@ pub fn vreinterpret_f32_p16(a: poly16x4_t) -> float32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52234,7 +52234,7 @@ pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52260,7 +52260,7 @@ pub fn vreinterpret_s8_p16(a: poly16x4_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52282,7 +52282,7 @@ pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52308,7 +52308,7 @@ pub fn vreinterpret_s16_p16(a: poly16x4_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52330,7 +52330,7 @@ pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52356,7 +52356,7 @@ pub fn vreinterpret_s32_p16(a: poly16x4_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52378,7 +52378,7 @@ pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52403,7 +52403,7 @@ pub fn vreinterpret_s64_p16(a: poly16x4_t) -> int64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52425,7 +52425,7 @@ pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52451,7 +52451,7 @@ pub fn vreinterpret_u8_p16(a: poly16x4_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52473,7 +52473,7 @@ pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52499,7 +52499,7 @@ pub fn vreinterpret_u16_p16(a: poly16x4_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52521,7 +52521,7 @@ pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52547,7 +52547,7 @@ pub fn vreinterpret_u32_p16(a: poly16x4_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52569,7 +52569,7 @@ pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52594,7 +52594,7 @@ pub fn vreinterpret_u64_p16(a: poly16x4_t) -> uint64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52616,7 +52616,7 @@ pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52642,7 +52642,7 @@ pub fn vreinterpret_p8_p16(a: poly16x4_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52664,7 +52664,7 @@ pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_f32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52690,7 +52690,7 @@ pub fn vreinterpretq_f32_p16(a: poly16x8_t) -> float32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52712,7 +52712,7 @@ pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52742,7 +52742,7 @@ pub fn vreinterpretq_s8_p16(a: poly16x8_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52764,7 +52764,7 @@ pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52790,7 +52790,7 @@ pub fn vreinterpretq_s16_p16(a: poly16x8_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52812,7 +52812,7 @@ pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52838,7 +52838,7 @@ pub fn vreinterpretq_s32_p16(a: poly16x8_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52860,7 +52860,7 @@ pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52886,7 +52886,7 @@ pub fn vreinterpretq_s64_p16(a: poly16x8_t) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52908,7 +52908,7 @@ pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52938,7 +52938,7 @@ pub fn vreinterpretq_u8_p16(a: poly16x8_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52960,7 +52960,7 @@ pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -52986,7 +52986,7 @@ pub fn vreinterpretq_u16_p16(a: poly16x8_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -53008,7 +53008,7 @@ pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -53034,7 +53034,7 @@ pub fn vreinterpretq_u32_p16(a: poly16x8_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -53056,7 +53056,7 @@ pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -53082,7 +53082,7 @@ pub fn vreinterpretq_u64_p16(a: poly16x8_t) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -53104,7 +53104,7 @@ pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -53134,7 +53134,7 @@ pub fn vreinterpretq_p8_p16(a: poly16x8_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53156,7 +53156,7 @@ pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53185,7 +53185,7 @@ pub fn vreinterpretq_s8_p128(a: p128) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53207,7 +53207,7 @@ pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53232,7 +53232,7 @@ pub fn vreinterpretq_s16_p128(a: p128) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53254,7 +53254,7 @@ pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53279,7 +53279,7 @@ pub fn vreinterpretq_s32_p128(a: p128) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53301,7 +53301,7 @@ pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s64_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53326,7 +53326,7 @@ pub fn vreinterpretq_s64_p128(a: p128) -> int64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53348,7 +53348,7 @@ pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53377,7 +53377,7 @@ pub fn vreinterpretq_u8_p128(a: p128) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53399,7 +53399,7 @@ pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53424,7 +53424,7 @@ pub fn vreinterpretq_u16_p128(a: p128) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53446,7 +53446,7 @@ pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53471,7 +53471,7 @@ pub fn vreinterpretq_u32_p128(a: p128) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53493,7 +53493,7 @@ pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u64_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53518,7 +53518,7 @@ pub fn vreinterpretq_u64_p128(a: p128) -> uint64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53540,7 +53540,7 @@ pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53569,7 +53569,7 @@ pub fn vreinterpretq_p8_p128(a: p128) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53591,7 +53591,7 @@ pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53616,7 +53616,7 @@ pub fn vreinterpretq_p16_p128(a: p128) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53638,7 +53638,7 @@ pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p128)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53663,7 +53663,7 @@ pub fn vreinterpretq_p64_p128(a: p128) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53685,7 +53685,7 @@ pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53710,7 +53710,7 @@ pub fn vreinterpret_p64_s8(a: int8x8_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53732,7 +53732,7 @@ pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53758,7 +53758,7 @@ pub fn vreinterpretq_p128_s8(a: int8x16_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53780,7 +53780,7 @@ pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53807,7 +53807,7 @@ pub fn vreinterpretq_p64_s8(a: int8x16_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53829,7 +53829,7 @@ pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53854,7 +53854,7 @@ pub fn vreinterpret_p64_s16(a: int16x4_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53876,7 +53876,7 @@ pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53901,7 +53901,7 @@ pub fn vreinterpretq_p128_s16(a: int16x8_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53923,7 +53923,7 @@ pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53949,7 +53949,7 @@ pub fn vreinterpretq_p64_s16(a: int16x8_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53971,7 +53971,7 @@ pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -53996,7 +53996,7 @@ pub fn vreinterpret_p64_s32(a: int32x2_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54018,7 +54018,7 @@ pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54043,7 +54043,7 @@ pub fn vreinterpretq_p128_s32(a: int32x4_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54065,7 +54065,7 @@ pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54091,7 +54091,7 @@ pub fn vreinterpretq_p64_s32(a: int32x4_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54113,7 +54113,7 @@ pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_s64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54138,7 +54138,7 @@ pub fn vreinterpretq_p128_s64(a: int64x2_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54160,7 +54160,7 @@ pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54185,7 +54185,7 @@ pub fn vreinterpret_p64_u8(a: uint8x8_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54207,7 +54207,7 @@ pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54233,7 +54233,7 @@ pub fn vreinterpretq_p128_u8(a: uint8x16_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54255,7 +54255,7 @@ pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54282,7 +54282,7 @@ pub fn vreinterpretq_p64_u8(a: uint8x16_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54304,7 +54304,7 @@ pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54329,7 +54329,7 @@ pub fn vreinterpret_p64_u16(a: uint16x4_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54351,7 +54351,7 @@ pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54376,7 +54376,7 @@ pub fn vreinterpretq_p128_u16(a: uint16x8_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54398,7 +54398,7 @@ pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54424,7 +54424,7 @@ pub fn vreinterpretq_p64_u16(a: uint16x8_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54446,7 +54446,7 @@ pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54471,7 +54471,7 @@ pub fn vreinterpret_p64_u32(a: uint32x2_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54493,7 +54493,7 @@ pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54518,7 +54518,7 @@ pub fn vreinterpretq_p128_u32(a: uint32x4_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54540,7 +54540,7 @@ pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54566,7 +54566,7 @@ pub fn vreinterpretq_p64_u32(a: uint32x4_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54588,7 +54588,7 @@ pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54613,7 +54613,7 @@ pub fn vreinterpretq_p128_u64(a: uint64x2_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54635,7 +54635,7 @@ pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54660,7 +54660,7 @@ pub fn vreinterpret_p64_p8(a: poly8x8_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54682,7 +54682,7 @@ pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54708,7 +54708,7 @@ pub fn vreinterpretq_p128_p8(a: poly8x16_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54730,7 +54730,7 @@ pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54757,7 +54757,7 @@ pub fn vreinterpretq_p64_p8(a: poly8x16_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54779,7 +54779,7 @@ pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54804,7 +54804,7 @@ pub fn vreinterpret_p64_p16(a: poly16x4_t) -> poly64x1_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54826,7 +54826,7 @@ pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54851,7 +54851,7 @@ pub fn vreinterpretq_p128_p16(a: poly16x8_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54873,7 +54873,7 @@ pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p64_p16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54899,7 +54899,7 @@ pub fn vreinterpretq_p64_p16(a: poly16x8_t) -> poly64x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54921,7 +54921,7 @@ pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54946,7 +54946,7 @@ pub fn vreinterpret_s8_p64(a: poly64x1_t) -> int8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54968,7 +54968,7 @@ pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -54993,7 +54993,7 @@ pub fn vreinterpret_s16_p64(a: poly64x1_t) -> int16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55015,7 +55015,7 @@ pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_s32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55040,7 +55040,7 @@ pub fn vreinterpret_s32_p64(a: poly64x1_t) -> int32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55062,7 +55062,7 @@ pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55087,7 +55087,7 @@ pub fn vreinterpret_u8_p64(a: poly64x1_t) -> uint8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55109,7 +55109,7 @@ pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55134,7 +55134,7 @@ pub fn vreinterpret_u16_p64(a: poly64x1_t) -> uint16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55156,7 +55156,7 @@ pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_u32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55181,7 +55181,7 @@ pub fn vreinterpret_u32_p64(a: poly64x1_t) -> uint32x2_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55203,7 +55203,7 @@ pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55228,7 +55228,7 @@ pub fn vreinterpret_p8_p64(a: poly64x1_t) -> poly8x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55250,7 +55250,7 @@ pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpret_p16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55275,7 +55275,7 @@ pub fn vreinterpret_p16_p64(a: poly64x1_t) -> poly16x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55297,7 +55297,7 @@ pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p128_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55322,7 +55322,7 @@ pub fn vreinterpretq_p128_p64(a: poly64x2_t) -> p128 { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55344,7 +55344,7 @@ pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55374,7 +55374,7 @@ pub fn vreinterpretq_s8_p64(a: poly64x2_t) -> int8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55396,7 +55396,7 @@ pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55422,7 +55422,7 @@ pub fn vreinterpretq_s16_p64(a: poly64x2_t) -> int16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55444,7 +55444,7 @@ pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_s32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55470,7 +55470,7 @@ pub fn vreinterpretq_s32_p64(a: poly64x2_t) -> int32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55492,7 +55492,7 @@ pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55522,7 +55522,7 @@ pub fn vreinterpretq_u8_p64(a: poly64x2_t) -> uint8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55544,7 +55544,7 @@ pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55570,7 +55570,7 @@ pub fn vreinterpretq_u16_p64(a: poly64x2_t) -> uint16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55592,7 +55592,7 @@ pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_u32_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55618,7 +55618,7 @@ pub fn vreinterpretq_u32_p64(a: poly64x2_t) -> uint32x4_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55640,7 +55640,7 @@ pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p8_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55670,7 +55670,7 @@ pub fn vreinterpretq_p8_p64(a: poly64x2_t) -> poly8x16_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55692,7 +55692,7 @@ pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { } #[doc = "Vector reinterpret cast operation"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vreinterpretq_p16_p64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -55718,7 +55718,7 @@ pub fn vreinterpretq_p16_p64(a: poly64x2_t) -> poly16x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] @@ -55739,7 +55739,7 @@ pub fn vrev16_p8(a: poly8x8_t) -> poly8x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] @@ -55760,7 +55760,7 @@ pub fn vrev16_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] @@ -55781,7 +55781,7 @@ pub fn vrev16_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] @@ -55802,7 +55802,7 @@ pub fn vrev16q_p8(a: poly8x16_t) -> poly8x16_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] @@ -55823,7 +55823,7 @@ pub fn vrev16q_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev16q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev16.8"))] @@ -55844,7 +55844,7 @@ pub fn vrev16q_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] @@ -55865,7 +55865,7 @@ pub fn vrev32_p16(a: poly16x4_t) -> poly16x4_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] @@ -55886,7 +55886,7 @@ pub fn vrev32_p8(a: poly8x8_t) -> poly8x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] @@ -55907,7 +55907,7 @@ pub fn vrev32_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] @@ -55928,7 +55928,7 @@ pub fn vrev32_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] @@ -55949,7 +55949,7 @@ pub fn vrev32_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] @@ -55970,7 +55970,7 @@ pub fn vrev32_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] @@ -55991,7 +55991,7 @@ pub fn vrev32q_p16(a: poly16x8_t) -> poly16x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] @@ -56012,7 +56012,7 @@ pub fn vrev32q_p8(a: poly8x16_t) -> poly8x16_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] @@ -56033,7 +56033,7 @@ pub fn vrev32q_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] @@ -56054,7 +56054,7 @@ pub fn vrev32q_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.16"))] @@ -56075,7 +56075,7 @@ pub fn vrev32q_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev32q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev32.8"))] @@ -56096,7 +56096,7 @@ pub fn vrev32q_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] @@ -56117,7 +56117,7 @@ pub fn vrev64_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] @@ -56138,7 +56138,7 @@ pub fn vrev64_p16(a: poly16x4_t) -> poly16x4_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] @@ -56159,7 +56159,7 @@ pub fn vrev64_p8(a: poly8x8_t) -> poly8x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] @@ -56180,7 +56180,7 @@ pub fn vrev64_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] @@ -56201,7 +56201,7 @@ pub fn vrev64_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] @@ -56222,7 +56222,7 @@ pub fn vrev64_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] @@ -56243,7 +56243,7 @@ pub fn vrev64_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] @@ -56264,7 +56264,7 @@ pub fn vrev64_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] @@ -56285,7 +56285,7 @@ pub fn vrev64_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] @@ -56306,7 +56306,7 @@ pub fn vrev64q_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] @@ -56327,7 +56327,7 @@ pub fn vrev64q_p16(a: poly16x8_t) -> poly16x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] @@ -56348,7 +56348,7 @@ pub fn vrev64q_p8(a: poly8x16_t) -> poly8x16_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] @@ -56369,7 +56369,7 @@ pub fn vrev64q_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] @@ -56390,7 +56390,7 @@ pub fn vrev64q_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] @@ -56411,7 +56411,7 @@ pub fn vrev64q_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.16"))] @@ -56432,7 +56432,7 @@ pub fn vrev64q_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.32"))] @@ -56453,7 +56453,7 @@ pub fn vrev64q_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Reversing vector elements (swap endianness)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrev64.8"))] @@ -56474,7 +56474,7 @@ pub fn vrev64q_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Reverse elements in 64-bit doublewords"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrev64))] #[cfg_attr( @@ -56496,7 +56496,7 @@ pub fn vrev64_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Reverse elements in 64-bit doublewords"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrev64q_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrev64))] #[cfg_attr( @@ -56518,7 +56518,7 @@ pub fn vrev64q_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] @@ -56547,7 +56547,7 @@ pub fn vrhadd_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s8"))] @@ -56576,7 +56576,7 @@ pub fn vrhaddq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] @@ -56605,7 +56605,7 @@ pub fn vrhadd_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s16"))] @@ -56634,7 +56634,7 @@ pub fn vrhaddq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] @@ -56663,7 +56663,7 @@ pub fn vrhadd_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.s32"))] @@ -56692,7 +56692,7 @@ pub fn vrhaddq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] @@ -56721,7 +56721,7 @@ pub fn vrhadd_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u8"))] @@ -56750,7 +56750,7 @@ pub fn vrhaddq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] @@ -56779,7 +56779,7 @@ pub fn vrhadd_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u16"))] @@ -56808,7 +56808,7 @@ pub fn vrhaddq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhadd_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] @@ -56837,7 +56837,7 @@ pub fn vrhadd_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Rounding halving add"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrhaddq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vrhadd.u32"))] @@ -56866,7 +56866,7 @@ pub fn vrhaddq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr( @@ -56895,7 +56895,7 @@ pub fn vrndn_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] #[cfg_attr( @@ -56924,7 +56924,7 @@ pub fn vrndnq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndn_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] @@ -56952,7 +56952,7 @@ pub fn vrndn_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Floating-point round to integral, to nearest with ties to even"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrndnq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp-armv8,v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrintn))] @@ -56980,7 +56980,7 @@ pub fn vrndnq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57009,7 +57009,7 @@ pub fn vrshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57038,7 +57038,7 @@ pub fn vrshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57067,7 +57067,7 @@ pub fn vrshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57096,7 +57096,7 @@ pub fn vrshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57125,7 +57125,7 @@ pub fn vrshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57154,7 +57154,7 @@ pub fn vrshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57183,7 +57183,7 @@ pub fn vrshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Signed rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57212,7 +57212,7 @@ pub fn vrshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57241,7 +57241,7 @@ pub fn vrshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57270,7 +57270,7 @@ pub fn vrshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57299,7 +57299,7 @@ pub fn vrshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57328,7 +57328,7 @@ pub fn vrshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57357,7 +57357,7 @@ pub fn vrshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57386,7 +57386,7 @@ pub fn vrshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshl_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57415,7 +57415,7 @@ pub fn vrshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Unsigned rounding shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshlq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshl))] @@ -57444,7 +57444,7 @@ pub fn vrshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57467,7 +57467,7 @@ pub fn vrshr_n_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57490,7 +57490,7 @@ pub fn vrshrq_n_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57513,7 +57513,7 @@ pub fn vrshr_n_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57536,7 +57536,7 @@ pub fn vrshrq_n_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57559,7 +57559,7 @@ pub fn vrshr_n_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57582,7 +57582,7 @@ pub fn vrshrq_n_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57605,7 +57605,7 @@ pub fn vrshr_n_s64(a: int64x1_t) -> int64x1_t { } #[doc = "Signed rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57628,7 +57628,7 @@ pub fn vrshrq_n_s64(a: int64x2_t) -> int64x2_t { } #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57651,7 +57651,7 @@ pub fn vrshr_n_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57674,7 +57674,7 @@ pub fn vrshrq_n_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57697,7 +57697,7 @@ pub fn vrshr_n_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57720,7 +57720,7 @@ pub fn vrshrq_n_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57743,7 +57743,7 @@ pub fn vrshr_n_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57766,7 +57766,7 @@ pub fn vrshrq_n_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshr_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57789,7 +57789,7 @@ pub fn vrshr_n_u64(a: uint64x1_t) -> uint64x1_t { } #[doc = "Unsigned rounding shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrq_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshr, N = 2))] @@ -57812,7 +57812,7 @@ pub fn vrshrq_n_u64(a: uint64x2_t) -> uint64x2_t { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vrshrn, N = 2))] @@ -57828,7 +57828,7 @@ pub fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vrshrn, N = 2))] @@ -57844,7 +57844,7 @@ pub fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vrshrn, N = 2))] @@ -57860,7 +57860,7 @@ pub fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(rshrn, N = 2))] @@ -57879,7 +57879,7 @@ pub fn vrshrn_n_s16(a: int16x8_t) -> int8x8_t { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(rshrn, N = 2))] @@ -57898,7 +57898,7 @@ pub fn vrshrn_n_s32(a: int32x4_t) -> int16x4_t { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(rshrn, N = 2))] @@ -57917,7 +57917,7 @@ pub fn vrshrn_n_s64(a: int64x2_t) -> int32x2_t { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] @@ -57940,7 +57940,7 @@ pub fn vrshrn_n_u16(a: uint16x8_t) -> uint8x8_t { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] @@ -57963,7 +57963,7 @@ pub fn vrshrn_n_u32(a: uint32x4_t) -> uint16x4_t { } #[doc = "Rounding shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrshrn_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrshrn, N = 2))] @@ -57986,7 +57986,7 @@ pub fn vrshrn_n_u64(a: uint64x2_t) -> uint32x2_t { } #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] @@ -58016,7 +58016,7 @@ pub fn vrsqrte_f16(a: float16x4_t) -> float16x4_t { } #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] @@ -58046,7 +58046,7 @@ pub fn vrsqrteq_f16(a: float16x8_t) -> float16x8_t { } #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] @@ -58075,7 +58075,7 @@ pub fn vrsqrte_f32(a: float32x2_t) -> float32x2_t { } #[doc = "Reciprocal square-root estimate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] @@ -58104,7 +58104,7 @@ pub fn vrsqrteq_f32(a: float32x4_t) -> float32x4_t { } #[doc = "Unsigned reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrte_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] @@ -58133,7 +58133,7 @@ pub fn vrsqrte_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Unsigned reciprocal square root estimate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrteq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrte))] @@ -58162,7 +58162,7 @@ pub fn vrsqrteq_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] @@ -58192,7 +58192,7 @@ pub fn vrsqrts_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,fp16")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] @@ -58222,7 +58222,7 @@ pub fn vrsqrtsq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrts_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] @@ -58251,7 +58251,7 @@ pub fn vrsqrts_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Floating-point reciprocal square root step"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsqrtsq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsqrts))] @@ -58280,7 +58280,7 @@ pub fn vrsqrtsq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58303,7 +58303,7 @@ pub fn vrsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58326,7 +58326,7 @@ pub fn vrsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58349,7 +58349,7 @@ pub fn vrsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58372,7 +58372,7 @@ pub fn vrsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58395,7 +58395,7 @@ pub fn vrsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58418,7 +58418,7 @@ pub fn vrsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58441,7 +58441,7 @@ pub fn vrsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Signed rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58464,7 +58464,7 @@ pub fn vrsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58487,7 +58487,7 @@ pub fn vrsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58510,7 +58510,7 @@ pub fn vrsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58533,7 +58533,7 @@ pub fn vrsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58556,7 +58556,7 @@ pub fn vrsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58579,7 +58579,7 @@ pub fn vrsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58602,7 +58602,7 @@ pub fn vrsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsra_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58625,7 +58625,7 @@ pub fn vrsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Unsigned rounding shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsraq_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsra, N = 2))] @@ -58648,7 +58648,7 @@ pub fn vrsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] @@ -58677,7 +58677,7 @@ pub fn vrsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] @@ -58706,7 +58706,7 @@ pub fn vrsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vrsubhn))] @@ -58735,7 +58735,7 @@ pub fn vrsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -58757,7 +58757,7 @@ pub fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u16)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -58784,7 +58784,7 @@ pub fn vrsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -58806,7 +58806,7 @@ pub fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -58833,7 +58833,7 @@ pub fn vrsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -58855,7 +58855,7 @@ pub fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { } #[doc = "Rounding subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vrsubhn_u64)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -58882,7 +58882,7 @@ pub fn vrsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( @@ -58899,7 +58899,7 @@ pub fn vset_lane_f16(a: f16, b: float16x4_t) -> float16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] #[cfg_attr( @@ -58916,7 +58916,7 @@ pub fn vsetq_lane_f16(a: f16, b: float16x8_t) -> float16x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -58939,7 +58939,7 @@ pub fn vset_lane_f32(a: f32, b: float32x2_t) -> float32x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -58962,7 +58962,7 @@ pub fn vsetq_lane_f32(a: f32, b: float32x4_t) -> float32x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -58985,7 +58985,7 @@ pub fn vset_lane_s8(a: i8, b: int8x8_t) -> int8x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59008,7 +59008,7 @@ pub fn vsetq_lane_s8(a: i8, b: int8x16_t) -> int8x16_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59031,7 +59031,7 @@ pub fn vset_lane_s16(a: i16, b: int16x4_t) -> int16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59054,7 +59054,7 @@ pub fn vsetq_lane_s16(a: i16, b: int16x8_t) -> int16x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59077,7 +59077,7 @@ pub fn vset_lane_s32(a: i32, b: int32x2_t) -> int32x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59100,7 +59100,7 @@ pub fn vsetq_lane_s32(a: i32, b: int32x4_t) -> int32x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59123,7 +59123,7 @@ pub fn vsetq_lane_s64(a: i64, b: int64x2_t) -> int64x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59146,7 +59146,7 @@ pub fn vset_lane_u8(a: u8, b: uint8x8_t) -> uint8x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59169,7 +59169,7 @@ pub fn vsetq_lane_u8(a: u8, b: uint8x16_t) -> uint8x16_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59192,7 +59192,7 @@ pub fn vset_lane_u16(a: u16, b: uint16x4_t) -> uint16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59215,7 +59215,7 @@ pub fn vsetq_lane_u16(a: u16, b: uint16x8_t) -> uint16x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59238,7 +59238,7 @@ pub fn vset_lane_u32(a: u32, b: uint32x2_t) -> uint32x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59261,7 +59261,7 @@ pub fn vsetq_lane_u32(a: u32, b: uint32x4_t) -> uint32x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59284,7 +59284,7 @@ pub fn vsetq_lane_u64(a: u64, b: uint64x2_t) -> uint64x2_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59307,7 +59307,7 @@ pub fn vset_lane_p8(a: p8, b: poly8x8_t) -> poly8x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59330,7 +59330,7 @@ pub fn vsetq_lane_p8(a: p8, b: poly8x16_t) -> poly8x16_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59353,7 +59353,7 @@ pub fn vset_lane_p16(a: p16, b: poly16x4_t) -> poly16x4_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59376,7 +59376,7 @@ pub fn vsetq_lane_p16(a: p16, b: poly16x8_t) -> poly16x8_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59399,7 +59399,7 @@ pub fn vset_lane_p64(a: p64, b: poly64x1_t) -> poly64x1_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59422,7 +59422,7 @@ pub fn vset_lane_s64(a: i64, b: int64x1_t) -> int64x1_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vset_lane_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59445,7 +59445,7 @@ pub fn vset_lane_u64(a: u64, b: uint64x1_t) -> uint64x1_t { } #[doc = "Insert vector element from another vector element"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsetq_lane_p64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -59468,7 +59468,7 @@ pub fn vsetq_lane_p64(a: p64, b: poly64x2_t) -> poly64x2_t { } #[doc = "SHA1 hash update accelerator, choose."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1cq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1c))] @@ -59493,7 +59493,7 @@ pub fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32 } #[doc = "SHA1 fixed rotate."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1h_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1h))] @@ -59518,7 +59518,7 @@ pub fn vsha1h_u32(hash_e: u32) -> u32 { } #[doc = "SHA1 hash update accelerator, majority"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1mq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1m))] @@ -59543,7 +59543,7 @@ pub fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32 } #[doc = "SHA1 hash update accelerator, parity"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1pq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1p))] @@ -59568,7 +59568,7 @@ pub fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32 } #[doc = "SHA1 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su0q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1su0))] @@ -59593,7 +59593,7 @@ pub fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> u } #[doc = "SHA1 schedule update accelerator, second part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha1su1q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha1su1))] @@ -59618,7 +59618,7 @@ pub fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t { } #[doc = "SHA1 schedule update accelerator, upper part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256h2q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha256h2))] @@ -59647,7 +59647,7 @@ pub fn vsha256h2q_u32(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4 } #[doc = "SHA1 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256hq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha256h))] @@ -59676,7 +59676,7 @@ pub fn vsha256hq_u32(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_ } #[doc = "SHA256 schedule update accelerator, first part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su0q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha256su0))] @@ -59701,7 +59701,7 @@ pub fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t { } #[doc = "SHA256 schedule update accelerator, second part."] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsha256su1q_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "sha2")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(test, assert_instr(sha256su1))] @@ -59725,7 +59725,7 @@ pub fn vsha256su1q_u32(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) } unsafe { _vsha256su1q_u32(tw0_3, w8_11, w12_15) } } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59738,7 +59738,7 @@ fn vshiftlins_v16i8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } unsafe { _vshiftlins_v16i8(a, b, const { int8x16_t([N as i8; 16]) }) } } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59751,7 +59751,7 @@ fn vshiftlins_v1i64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } unsafe { _vshiftlins_v1i64(a, b, const { int64x1_t([N as i64; 1]) }) } } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59764,7 +59764,7 @@ fn vshiftlins_v2i32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } unsafe { _vshiftlins_v2i32(a, b, const { int32x2_t([N; 2]) }) } } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59777,7 +59777,7 @@ fn vshiftlins_v2i64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } unsafe { _vshiftlins_v2i64(a, b, const { int64x2_t([N as i64; 2]) }) } } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59790,7 +59790,7 @@ fn vshiftlins_v4i16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } unsafe { _vshiftlins_v4i16(a, b, const { int16x4_t([N as i16; 4]) }) } } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59803,7 +59803,7 @@ fn vshiftlins_v4i32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } unsafe { _vshiftlins_v4i32(a, b, const { int32x4_t([N; 4]) }) } } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59816,7 +59816,7 @@ fn vshiftlins_v8i16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } unsafe { _vshiftlins_v8i16(a, b, const { int16x8_t([N as i16; 8]) }) } } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59831,7 +59831,7 @@ fn vshiftlins_v8i8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v16i8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59846,7 +59846,7 @@ fn vshiftrins_v16i8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v1i64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59861,7 +59861,7 @@ fn vshiftrins_v1i64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v2i32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59876,7 +59876,7 @@ fn vshiftrins_v2i32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v2i64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59891,7 +59891,7 @@ fn vshiftrins_v2i64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v4i16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59906,7 +59906,7 @@ fn vshiftrins_v4i16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v4i32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59921,7 +59921,7 @@ fn vshiftrins_v4i32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v8i16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59936,7 +59936,7 @@ fn vshiftrins_v8i16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshiftrins_v8i8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -59951,7 +59951,7 @@ fn vshiftrins_v8i8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -59974,7 +59974,7 @@ pub fn vshl_n_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -59997,7 +59997,7 @@ pub fn vshlq_n_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60020,7 +60020,7 @@ pub fn vshl_n_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60043,7 +60043,7 @@ pub fn vshlq_n_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60066,7 +60066,7 @@ pub fn vshl_n_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60089,7 +60089,7 @@ pub fn vshlq_n_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60112,7 +60112,7 @@ pub fn vshl_n_s64(a: int64x1_t) -> int64x1_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60135,7 +60135,7 @@ pub fn vshlq_n_s64(a: int64x2_t) -> int64x2_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60158,7 +60158,7 @@ pub fn vshl_n_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60181,7 +60181,7 @@ pub fn vshlq_n_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60204,7 +60204,7 @@ pub fn vshl_n_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60227,7 +60227,7 @@ pub fn vshlq_n_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60250,7 +60250,7 @@ pub fn vshl_n_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60273,7 +60273,7 @@ pub fn vshlq_n_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60296,7 +60296,7 @@ pub fn vshl_n_u64(a: uint64x1_t) -> uint64x1_t { } #[doc = "Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl, N = 2))] @@ -60319,7 +60319,7 @@ pub fn vshlq_n_u64(a: uint64x2_t) -> uint64x2_t { } #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60348,7 +60348,7 @@ pub fn vshl_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60377,7 +60377,7 @@ pub fn vshlq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60406,7 +60406,7 @@ pub fn vshl_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60435,7 +60435,7 @@ pub fn vshlq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60464,7 +60464,7 @@ pub fn vshl_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60493,7 +60493,7 @@ pub fn vshlq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60522,7 +60522,7 @@ pub fn vshl_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Signed Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60551,7 +60551,7 @@ pub fn vshlq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60580,7 +60580,7 @@ pub fn vshl_u8(a: uint8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60609,7 +60609,7 @@ pub fn vshlq_u8(a: uint8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60638,7 +60638,7 @@ pub fn vshl_u16(a: uint16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60667,7 +60667,7 @@ pub fn vshlq_u16(a: uint16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60696,7 +60696,7 @@ pub fn vshl_u32(a: uint32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60725,7 +60725,7 @@ pub fn vshlq_u32(a: uint32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshl_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60754,7 +60754,7 @@ pub fn vshl_u64(a: uint64x1_t, b: int64x1_t) -> uint64x1_t { } #[doc = "Unsigned Shift left"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshlq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vshl))] @@ -60783,7 +60783,7 @@ pub fn vshlq_u64(a: uint64x2_t, b: int64x2_t) -> uint64x2_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s16", N = 2))] @@ -60806,7 +60806,7 @@ pub fn vshll_n_s16(a: int16x4_t) -> int32x4_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s32", N = 2))] @@ -60829,7 +60829,7 @@ pub fn vshll_n_s32(a: int32x2_t) -> int64x2_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.s8", N = 2))] @@ -60852,7 +60852,7 @@ pub fn vshll_n_s8(a: int8x8_t) -> int16x8_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u16", N = 2))] @@ -60875,7 +60875,7 @@ pub fn vshll_n_u16(a: uint16x4_t) -> uint32x4_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u32", N = 2))] @@ -60898,7 +60898,7 @@ pub fn vshll_n_u32(a: uint32x2_t) -> uint64x2_t { } #[doc = "Signed shift left long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshll_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshll.u8", N = 2))] @@ -60921,7 +60921,7 @@ pub fn vshll_n_u8(a: uint8x8_t) -> uint16x8_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] @@ -60945,7 +60945,7 @@ pub fn vshr_n_s8(a: int8x8_t) -> int8x8_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s8", N = 2))] @@ -60969,7 +60969,7 @@ pub fn vshrq_n_s8(a: int8x16_t) -> int8x16_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] @@ -60993,7 +60993,7 @@ pub fn vshr_n_s16(a: int16x4_t) -> int16x4_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s16", N = 2))] @@ -61017,7 +61017,7 @@ pub fn vshrq_n_s16(a: int16x8_t) -> int16x8_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] @@ -61041,7 +61041,7 @@ pub fn vshr_n_s32(a: int32x2_t) -> int32x2_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s32", N = 2))] @@ -61065,7 +61065,7 @@ pub fn vshrq_n_s32(a: int32x4_t) -> int32x4_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] @@ -61089,7 +61089,7 @@ pub fn vshr_n_s64(a: int64x1_t) -> int64x1_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.s64", N = 2))] @@ -61113,7 +61113,7 @@ pub fn vshrq_n_s64(a: int64x2_t) -> int64x2_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] @@ -61141,7 +61141,7 @@ pub fn vshr_n_u8(a: uint8x8_t) -> uint8x8_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u8", N = 2))] @@ -61169,7 +61169,7 @@ pub fn vshrq_n_u8(a: uint8x16_t) -> uint8x16_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] @@ -61197,7 +61197,7 @@ pub fn vshr_n_u16(a: uint16x4_t) -> uint16x4_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u16", N = 2))] @@ -61225,7 +61225,7 @@ pub fn vshrq_n_u16(a: uint16x8_t) -> uint16x8_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] @@ -61253,7 +61253,7 @@ pub fn vshr_n_u32(a: uint32x2_t) -> uint32x2_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u32", N = 2))] @@ -61281,7 +61281,7 @@ pub fn vshrq_n_u32(a: uint32x4_t) -> uint32x4_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshr_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] @@ -61309,7 +61309,7 @@ pub fn vshr_n_u64(a: uint64x1_t) -> uint64x1_t { } #[doc = "Shift right"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrq_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshr.u64", N = 2))] @@ -61337,7 +61337,7 @@ pub fn vshrq_n_u64(a: uint64x2_t) -> uint64x2_t { } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] @@ -61360,7 +61360,7 @@ pub fn vshrn_n_s16(a: int16x8_t) -> int8x8_t { } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] @@ -61383,7 +61383,7 @@ pub fn vshrn_n_s32(a: int32x4_t) -> int16x4_t { } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] @@ -61406,7 +61406,7 @@ pub fn vshrn_n_s64(a: int64x2_t) -> int32x2_t { } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i16", N = 2))] @@ -61429,7 +61429,7 @@ pub fn vshrn_n_u16(a: uint16x8_t) -> uint8x8_t { } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i32", N = 2))] @@ -61452,7 +61452,7 @@ pub fn vshrn_n_u32(a: uint32x4_t) -> uint16x4_t { } #[doc = "Shift right narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vshrn_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vshrn.i64", N = 2))] @@ -61475,7 +61475,7 @@ pub fn vshrn_n_u64(a: uint64x2_t) -> uint32x2_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61487,7 +61487,7 @@ pub fn vsli_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61499,7 +61499,7 @@ pub fn vsliq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61511,7 +61511,7 @@ pub fn vsli_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61523,7 +61523,7 @@ pub fn vsliq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61535,7 +61535,7 @@ pub fn vsli_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61547,7 +61547,7 @@ pub fn vsliq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_s64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61559,7 +61559,7 @@ pub fn vsli_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_s64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61571,7 +61571,7 @@ pub fn vsliq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61583,7 +61583,7 @@ pub fn vsli_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61595,7 +61595,7 @@ pub fn vsliq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61607,7 +61607,7 @@ pub fn vsli_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61619,7 +61619,7 @@ pub fn vsliq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61631,7 +61631,7 @@ pub fn vsli_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61643,7 +61643,7 @@ pub fn vsliq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_u64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61655,7 +61655,7 @@ pub fn vsli_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_u64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61667,7 +61667,7 @@ pub fn vsliq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61679,7 +61679,7 @@ pub fn vsli_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61691,7 +61691,7 @@ pub fn vsliq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsli_n_p16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61703,7 +61703,7 @@ pub fn vsli_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Shift Left and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsliq_n_p16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -61715,7 +61715,7 @@ pub fn vsliq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { } #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61738,7 +61738,7 @@ pub fn vsra_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61761,7 +61761,7 @@ pub fn vsraq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61784,7 +61784,7 @@ pub fn vsra_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61807,7 +61807,7 @@ pub fn vsraq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61830,7 +61830,7 @@ pub fn vsra_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61853,7 +61853,7 @@ pub fn vsraq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61876,7 +61876,7 @@ pub fn vsra_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Signed shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61899,7 +61899,7 @@ pub fn vsraq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61922,7 +61922,7 @@ pub fn vsra_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61945,7 +61945,7 @@ pub fn vsraq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61968,7 +61968,7 @@ pub fn vsra_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -61991,7 +61991,7 @@ pub fn vsraq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -62014,7 +62014,7 @@ pub fn vsra_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -62037,7 +62037,7 @@ pub fn vsraq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsra_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -62060,7 +62060,7 @@ pub fn vsra_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Unsigned shift right and accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsraq_n_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsra, N = 2))] @@ -62083,7 +62083,7 @@ pub fn vsraq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62095,7 +62095,7 @@ pub fn vsri_n_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62107,7 +62107,7 @@ pub fn vsriq_n_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62119,7 +62119,7 @@ pub fn vsri_n_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62131,7 +62131,7 @@ pub fn vsriq_n_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62143,7 +62143,7 @@ pub fn vsri_n_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62155,7 +62155,7 @@ pub fn vsriq_n_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62167,7 +62167,7 @@ pub fn vsri_n_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62179,7 +62179,7 @@ pub fn vsriq_n_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62191,7 +62191,7 @@ pub fn vsri_n_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62203,7 +62203,7 @@ pub fn vsriq_n_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62215,7 +62215,7 @@ pub fn vsri_n_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62227,7 +62227,7 @@ pub fn vsriq_n_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62239,7 +62239,7 @@ pub fn vsri_n_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u32)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62251,7 +62251,7 @@ pub fn vsriq_n_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_u64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62263,7 +62263,7 @@ pub fn vsri_n_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_u64)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62275,7 +62275,7 @@ pub fn vsriq_n_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62287,7 +62287,7 @@ pub fn vsri_n_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p8)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62299,7 +62299,7 @@ pub fn vsriq_n_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsri_n_p16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62311,7 +62311,7 @@ pub fn vsri_n_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4_t { } #[doc = "Shift Right and Insert (immediate)"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsriq_n_p16)"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -62325,7 +62325,7 @@ pub fn vsriq_n_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8_t { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] @@ -62343,7 +62343,7 @@ pub unsafe fn vst1_f16(ptr: *mut f16, a: float16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] @@ -62361,7 +62361,7 @@ pub unsafe fn vst1q_f16(ptr: *mut f16, a: float16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(vst1))] @@ -62379,7 +62379,7 @@ pub unsafe fn vst1_f16_x2(a: *mut f16, b: float16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(vst1))] @@ -62397,7 +62397,7 @@ pub unsafe fn vst1q_f16_x2(a: *mut f16, b: float16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] @@ -62417,7 +62417,7 @@ pub unsafe fn vst1_f16_x2(a: *mut f16, b: float16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] @@ -62437,7 +62437,7 @@ pub unsafe fn vst1q_f16_x2(a: *mut f16, b: float16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(vst1))] @@ -62455,7 +62455,7 @@ pub unsafe fn vst1_f16_x3(a: *mut f16, b: float16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(test, assert_instr(vst1))] @@ -62473,7 +62473,7 @@ pub unsafe fn vst1q_f16_x3(a: *mut f16, b: float16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] @@ -62493,7 +62493,7 @@ pub unsafe fn vst1_f16_x3(a: *mut f16, b: float16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] @@ -62513,7 +62513,7 @@ pub unsafe fn vst1q_f16_x3(a: *mut f16, b: float16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62538,7 +62538,7 @@ pub unsafe fn vst1_f16_x4(a: *mut f16, b: float16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62563,7 +62563,7 @@ pub unsafe fn vst1q_f16_x4(a: *mut f16, b: float16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] @@ -62589,7 +62589,7 @@ pub unsafe fn vst1_f16_x4(a: *mut f16, b: float16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] #[target_feature(enable = "neon,fp16")] @@ -62615,7 +62615,7 @@ pub unsafe fn vst1q_f16_x4(a: *mut f16, b: float16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62629,7 +62629,7 @@ pub unsafe fn vst1_f32(ptr: *mut f32, a: float32x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62643,7 +62643,7 @@ pub unsafe fn vst1q_f32(ptr: *mut f32, a: float32x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62657,7 +62657,7 @@ pub unsafe fn vst1_s8(ptr: *mut i8, a: int8x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62671,7 +62671,7 @@ pub unsafe fn vst1q_s8(ptr: *mut i8, a: int8x16_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62685,7 +62685,7 @@ pub unsafe fn vst1_s16(ptr: *mut i16, a: int16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62699,7 +62699,7 @@ pub unsafe fn vst1q_s16(ptr: *mut i16, a: int16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62713,7 +62713,7 @@ pub unsafe fn vst1_s32(ptr: *mut i32, a: int32x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62727,7 +62727,7 @@ pub unsafe fn vst1q_s32(ptr: *mut i32, a: int32x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62741,7 +62741,7 @@ pub unsafe fn vst1_s64(ptr: *mut i64, a: int64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62755,7 +62755,7 @@ pub unsafe fn vst1q_s64(ptr: *mut i64, a: int64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62769,7 +62769,7 @@ pub unsafe fn vst1_u8(ptr: *mut u8, a: uint8x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62783,7 +62783,7 @@ pub unsafe fn vst1q_u8(ptr: *mut u8, a: uint8x16_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62797,7 +62797,7 @@ pub unsafe fn vst1_u16(ptr: *mut u16, a: uint16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62811,7 +62811,7 @@ pub unsafe fn vst1q_u16(ptr: *mut u16, a: uint16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62825,7 +62825,7 @@ pub unsafe fn vst1_u32(ptr: *mut u32, a: uint32x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62839,7 +62839,7 @@ pub unsafe fn vst1q_u32(ptr: *mut u32, a: uint32x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62853,7 +62853,7 @@ pub unsafe fn vst1_u64(ptr: *mut u64, a: uint64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62867,7 +62867,7 @@ pub unsafe fn vst1q_u64(ptr: *mut u64, a: uint64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62881,7 +62881,7 @@ pub unsafe fn vst1_p8(ptr: *mut p8, a: poly8x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62895,7 +62895,7 @@ pub unsafe fn vst1q_p8(ptr: *mut p8, a: poly8x16_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62909,7 +62909,7 @@ pub unsafe fn vst1_p16(ptr: *mut p16, a: poly16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62923,7 +62923,7 @@ pub unsafe fn vst1q_p16(ptr: *mut p16, a: poly16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62937,7 +62937,7 @@ pub unsafe fn vst1_p64(ptr: *mut p64, a: poly64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -62951,7 +62951,7 @@ pub unsafe fn vst1q_p64(ptr: *mut p64, a: poly64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] @@ -62967,7 +62967,7 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst1))] @@ -62983,7 +62983,7 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] @@ -63002,7 +63002,7 @@ pub unsafe fn vst1_f32_x2(a: *mut f32, b: float32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] @@ -63021,7 +63021,7 @@ pub unsafe fn vst1q_f32_x2(a: *mut f32, b: float32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] @@ -63040,7 +63040,7 @@ pub unsafe fn vst1_f32_x3(a: *mut f32, b: float32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] @@ -63059,7 +63059,7 @@ pub unsafe fn vst1q_f32_x3(a: *mut f32, b: float32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -63081,7 +63081,7 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -63103,7 +63103,7 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] @@ -63128,7 +63128,7 @@ pub unsafe fn vst1_f32_x4(a: *mut f32, b: float32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_f32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(test, assert_instr(st1))] @@ -63153,7 +63153,7 @@ pub unsafe fn vst1q_f32_x4(a: *mut f32, b: float32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63173,7 +63173,7 @@ pub unsafe fn vst1_lane_f16(a: *mut f16, b: float16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63193,7 +63193,7 @@ pub unsafe fn vst1q_lane_f16(a: *mut f16, b: float16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63218,7 +63218,7 @@ pub unsafe fn vst1_lane_f32(a: *mut f32, b: float32x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63243,7 +63243,7 @@ pub unsafe fn vst1q_lane_f32(a: *mut f32, b: float32x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63268,7 +63268,7 @@ pub unsafe fn vst1_lane_s8(a: *mut i8, b: int8x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63293,7 +63293,7 @@ pub unsafe fn vst1q_lane_s8(a: *mut i8, b: int8x16_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63318,7 +63318,7 @@ pub unsafe fn vst1_lane_s16(a: *mut i16, b: int16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63343,7 +63343,7 @@ pub unsafe fn vst1q_lane_s16(a: *mut i16, b: int16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63368,7 +63368,7 @@ pub unsafe fn vst1_lane_s32(a: *mut i32, b: int32x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63393,7 +63393,7 @@ pub unsafe fn vst1q_lane_s32(a: *mut i32, b: int32x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63418,7 +63418,7 @@ pub unsafe fn vst1q_lane_s64(a: *mut i64, b: int64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63443,7 +63443,7 @@ pub unsafe fn vst1_lane_u8(a: *mut u8, b: uint8x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63468,7 +63468,7 @@ pub unsafe fn vst1q_lane_u8(a: *mut u8, b: uint8x16_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63493,7 +63493,7 @@ pub unsafe fn vst1_lane_u16(a: *mut u16, b: uint16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63518,7 +63518,7 @@ pub unsafe fn vst1q_lane_u16(a: *mut u16, b: uint16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63543,7 +63543,7 @@ pub unsafe fn vst1_lane_u32(a: *mut u32, b: uint32x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63568,7 +63568,7 @@ pub unsafe fn vst1q_lane_u32(a: *mut u32, b: uint32x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63593,7 +63593,7 @@ pub unsafe fn vst1q_lane_u64(a: *mut u64, b: uint64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63618,7 +63618,7 @@ pub unsafe fn vst1_lane_p8(a: *mut p8, b: poly8x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63643,7 +63643,7 @@ pub unsafe fn vst1q_lane_p8(a: *mut p8, b: poly8x16_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63668,7 +63668,7 @@ pub unsafe fn vst1_lane_p16(a: *mut p16, b: poly16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63693,7 +63693,7 @@ pub unsafe fn vst1q_lane_p16(a: *mut p16, b: poly16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,aes")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63718,7 +63718,7 @@ pub unsafe fn vst1_lane_p64(a: *mut p64, b: poly64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63743,7 +63743,7 @@ pub unsafe fn vst1_lane_s64(a: *mut i64, b: int64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_lane_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -63768,7 +63768,7 @@ pub unsafe fn vst1_lane_u64(a: *mut u64, b: uint64x1_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -63791,7 +63791,7 @@ pub unsafe fn vst1_p64_x2(a: *mut p64, b: poly64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -63814,7 +63814,7 @@ pub unsafe fn vst1_p64_x3(a: *mut p64, b: poly64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -63837,7 +63837,7 @@ pub unsafe fn vst1_p64_x4(a: *mut p64, b: poly64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -63860,7 +63860,7 @@ pub unsafe fn vst1q_p64_x2(a: *mut p64, b: poly64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -63883,7 +63883,7 @@ pub unsafe fn vst1q_p64_x3(a: *mut p64, b: poly64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,aes")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -63906,7 +63906,7 @@ pub unsafe fn vst1q_p64_x4(a: *mut p64, b: poly64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -63925,7 +63925,7 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -63944,7 +63944,7 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -63963,7 +63963,7 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -63982,7 +63982,7 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64001,7 +64001,7 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64020,7 +64020,7 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64039,7 +64039,7 @@ pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64058,7 +64058,7 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64074,7 +64074,7 @@ pub unsafe fn vst1_s8_x2(a: *mut i8, b: int8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64090,7 +64090,7 @@ pub unsafe fn vst1q_s8_x2(a: *mut i8, b: int8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64106,7 +64106,7 @@ pub unsafe fn vst1_s16_x2(a: *mut i16, b: int16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64122,7 +64122,7 @@ pub unsafe fn vst1q_s16_x2(a: *mut i16, b: int16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64138,7 +64138,7 @@ pub unsafe fn vst1_s32_x2(a: *mut i32, b: int32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64154,7 +64154,7 @@ pub unsafe fn vst1q_s32_x2(a: *mut i32, b: int32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64170,7 +64170,7 @@ pub unsafe fn vst1_s64_x2(a: *mut i64, b: int64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64186,7 +64186,7 @@ pub unsafe fn vst1q_s64_x2(a: *mut i64, b: int64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64205,7 +64205,7 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64224,7 +64224,7 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64243,7 +64243,7 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64262,7 +64262,7 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64281,7 +64281,7 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64300,7 +64300,7 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64319,7 +64319,7 @@ pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64338,7 +64338,7 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64354,7 +64354,7 @@ pub unsafe fn vst1_s8_x3(a: *mut i8, b: int8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64370,7 +64370,7 @@ pub unsafe fn vst1q_s8_x3(a: *mut i8, b: int8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64386,7 +64386,7 @@ pub unsafe fn vst1_s16_x3(a: *mut i16, b: int16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64402,7 +64402,7 @@ pub unsafe fn vst1q_s16_x3(a: *mut i16, b: int16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64418,7 +64418,7 @@ pub unsafe fn vst1_s32_x3(a: *mut i32, b: int32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64434,7 +64434,7 @@ pub unsafe fn vst1q_s32_x3(a: *mut i32, b: int32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64450,7 +64450,7 @@ pub unsafe fn vst1_s64_x3(a: *mut i64, b: int64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64466,7 +64466,7 @@ pub unsafe fn vst1q_s64_x3(a: *mut i64, b: int64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64485,7 +64485,7 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64504,7 +64504,7 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64523,7 +64523,7 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64542,7 +64542,7 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64561,7 +64561,7 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64580,7 +64580,7 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64599,7 +64599,7 @@ pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -64618,7 +64618,7 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64634,7 +64634,7 @@ pub unsafe fn vst1_s8_x4(a: *mut i8, b: int8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64650,7 +64650,7 @@ pub unsafe fn vst1q_s8_x4(a: *mut i8, b: int8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64666,7 +64666,7 @@ pub unsafe fn vst1_s16_x4(a: *mut i16, b: int16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64682,7 +64682,7 @@ pub unsafe fn vst1q_s16_x4(a: *mut i16, b: int16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64698,7 +64698,7 @@ pub unsafe fn vst1_s32_x4(a: *mut i32, b: int32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64714,7 +64714,7 @@ pub unsafe fn vst1q_s32_x4(a: *mut i32, b: int32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64730,7 +64730,7 @@ pub unsafe fn vst1_s64_x4(a: *mut i64, b: int64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_s64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -64746,7 +64746,7 @@ pub unsafe fn vst1q_s64_x4(a: *mut i64, b: int64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64769,7 +64769,7 @@ pub unsafe fn vst1_u8_x2(a: *mut u8, b: uint8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64792,7 +64792,7 @@ pub unsafe fn vst1_u8_x3(a: *mut u8, b: uint8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64815,7 +64815,7 @@ pub unsafe fn vst1_u8_x4(a: *mut u8, b: uint8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64838,7 +64838,7 @@ pub unsafe fn vst1q_u8_x2(a: *mut u8, b: uint8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64861,7 +64861,7 @@ pub unsafe fn vst1q_u8_x3(a: *mut u8, b: uint8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64884,7 +64884,7 @@ pub unsafe fn vst1q_u8_x4(a: *mut u8, b: uint8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64907,7 +64907,7 @@ pub unsafe fn vst1_u16_x2(a: *mut u16, b: uint16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64930,7 +64930,7 @@ pub unsafe fn vst1_u16_x3(a: *mut u16, b: uint16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64953,7 +64953,7 @@ pub unsafe fn vst1_u16_x4(a: *mut u16, b: uint16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64976,7 +64976,7 @@ pub unsafe fn vst1q_u16_x2(a: *mut u16, b: uint16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -64999,7 +64999,7 @@ pub unsafe fn vst1q_u16_x3(a: *mut u16, b: uint16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65022,7 +65022,7 @@ pub unsafe fn vst1q_u16_x4(a: *mut u16, b: uint16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65045,7 +65045,7 @@ pub unsafe fn vst1_u32_x2(a: *mut u32, b: uint32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65068,7 +65068,7 @@ pub unsafe fn vst1_u32_x3(a: *mut u32, b: uint32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65091,7 +65091,7 @@ pub unsafe fn vst1_u32_x4(a: *mut u32, b: uint32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65114,7 +65114,7 @@ pub unsafe fn vst1q_u32_x2(a: *mut u32, b: uint32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65137,7 +65137,7 @@ pub unsafe fn vst1q_u32_x3(a: *mut u32, b: uint32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u32_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65160,7 +65160,7 @@ pub unsafe fn vst1q_u32_x4(a: *mut u32, b: uint32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65183,7 +65183,7 @@ pub unsafe fn vst1_u64_x2(a: *mut u64, b: uint64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65206,7 +65206,7 @@ pub unsafe fn vst1_u64_x3(a: *mut u64, b: uint64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65229,7 +65229,7 @@ pub unsafe fn vst1_u64_x4(a: *mut u64, b: uint64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65252,7 +65252,7 @@ pub unsafe fn vst1q_u64_x2(a: *mut u64, b: uint64x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65275,7 +65275,7 @@ pub unsafe fn vst1q_u64_x3(a: *mut u64, b: uint64x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_u64_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65298,7 +65298,7 @@ pub unsafe fn vst1q_u64_x4(a: *mut u64, b: uint64x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65321,7 +65321,7 @@ pub unsafe fn vst1_p8_x2(a: *mut p8, b: poly8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65344,7 +65344,7 @@ pub unsafe fn vst1_p8_x3(a: *mut p8, b: poly8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65367,7 +65367,7 @@ pub unsafe fn vst1_p8_x4(a: *mut p8, b: poly8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65390,7 +65390,7 @@ pub unsafe fn vst1q_p8_x2(a: *mut p8, b: poly8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65413,7 +65413,7 @@ pub unsafe fn vst1q_p8_x3(a: *mut p8, b: poly8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p8_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65436,7 +65436,7 @@ pub unsafe fn vst1q_p8_x4(a: *mut p8, b: poly8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65459,7 +65459,7 @@ pub unsafe fn vst1_p16_x2(a: *mut p16, b: poly16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65482,7 +65482,7 @@ pub unsafe fn vst1_p16_x3(a: *mut p16, b: poly16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65505,7 +65505,7 @@ pub unsafe fn vst1_p16_x4(a: *mut p16, b: poly16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x2)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65528,7 +65528,7 @@ pub unsafe fn vst1q_p16_x2(a: *mut p16, b: poly16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x3)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65551,7 +65551,7 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_p16_x4)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst1))] @@ -65570,7 +65570,7 @@ pub unsafe fn vst1q_p16_x3(a: *mut p16, b: poly16x8x3_t) { pub unsafe fn vst1q_p16_x4(a: *mut p16, b: poly16x8x4_t) { vst1q_s16_x4(transmute(a), transmute(b)) } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65584,7 +65584,7 @@ unsafe fn vst1_v1i64(addr: *const i8, val: int64x1_t) { } _vst1_v1i64(addr, val, ALIGN) } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65598,7 +65598,7 @@ unsafe fn vst1_v2f32(addr: *const i8, val: float32x2_t) { } _vst1_v2f32(addr, val, ALIGN) } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65612,7 +65612,7 @@ unsafe fn vst1_v2i32(addr: *const i8, val: int32x2_t) { } _vst1_v2i32(addr, val, ALIGN) } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65626,7 +65626,7 @@ unsafe fn vst1_v4i16(addr: *const i8, val: int16x4_t) { } _vst1_v4i16(addr, val, ALIGN) } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65640,7 +65640,7 @@ unsafe fn vst1_v8i8(addr: *const i8, val: int8x8_t) { } _vst1_v8i8(addr, val, ALIGN) } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65654,7 +65654,7 @@ unsafe fn vst1q_v16i8(addr: *const i8, val: int8x16_t) { } _vst1q_v16i8(addr, val, ALIGN) } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65668,7 +65668,7 @@ unsafe fn vst1q_v2i64(addr: *const i8, val: int64x2_t) { } _vst1q_v2i64(addr, val, ALIGN) } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65682,7 +65682,7 @@ unsafe fn vst1q_v4f32(addr: *const i8, val: float32x4_t) { } _vst1q_v4f32(addr, val, ALIGN) } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65696,7 +65696,7 @@ unsafe fn vst1q_v4i32(addr: *const i8, val: int32x4_t) { } _vst1q_v4i32(addr, val, ALIGN) } -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65714,7 +65714,7 @@ unsafe fn vst1q_v8i16(addr: *const i8, val: int16x8_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1_v4f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] @@ -65732,7 +65732,7 @@ unsafe fn vst1_v4f16(addr: *const i8, val: float16x4_t, align: i32) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_v8f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[target_feature(enable = "neon,fp16")] @@ -65750,7 +65750,7 @@ unsafe fn vst1q_v8f16(addr: *const i8, val: float16x8_t, align: i32) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst1q_lane_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,aes")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop, LANE = 0))] @@ -65775,7 +65775,7 @@ pub unsafe fn vst1q_lane_p64(a: *mut p64, b: poly64x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] @@ -65796,7 +65796,7 @@ pub unsafe fn vst2_f16(a: *mut f16, b: float16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] @@ -65817,7 +65817,7 @@ pub unsafe fn vst2q_f16(a: *mut f16, b: float16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65836,7 +65836,7 @@ pub unsafe fn vst2_f16(a: *mut f16, b: float16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -65855,7 +65855,7 @@ pub unsafe fn vst2q_f16(a: *mut f16, b: float16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -65867,7 +65867,7 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -65879,7 +65879,7 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -65891,7 +65891,7 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -65903,7 +65903,7 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -65915,7 +65915,7 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -65927,7 +65927,7 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -65939,7 +65939,7 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -65951,7 +65951,7 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -65967,7 +65967,7 @@ pub unsafe fn vst2_f32(a: *mut f32, b: float32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -65983,7 +65983,7 @@ pub unsafe fn vst2q_f32(a: *mut f32, b: float32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -65999,7 +65999,7 @@ pub unsafe fn vst2_s8(a: *mut i8, b: int8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -66015,7 +66015,7 @@ pub unsafe fn vst2q_s8(a: *mut i8, b: int8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -66031,7 +66031,7 @@ pub unsafe fn vst2_s16(a: *mut i16, b: int16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -66047,7 +66047,7 @@ pub unsafe fn vst2q_s16(a: *mut i16, b: int16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -66063,7 +66063,7 @@ pub unsafe fn vst2_s32(a: *mut i32, b: int32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -66079,7 +66079,7 @@ pub unsafe fn vst2q_s32(a: *mut i32, b: int32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -66102,7 +66102,7 @@ pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -66125,7 +66125,7 @@ pub unsafe fn vst2q_lane_f16(a: *mut f16, b: float16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -66146,7 +66146,7 @@ pub unsafe fn vst2_lane_f16(a: *mut f16, b: float16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -66167,7 +66167,7 @@ pub unsafe fn vst2q_lane_f16(a: *mut f16, b: float16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -66188,7 +66188,7 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -66209,7 +66209,7 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -66230,7 +66230,7 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -66251,7 +66251,7 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -66272,7 +66272,7 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -66293,7 +66293,7 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -66314,7 +66314,7 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] @@ -66332,7 +66332,7 @@ pub unsafe fn vst2_lane_f32(a: *mut f32, b: float32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] @@ -66350,7 +66350,7 @@ pub unsafe fn vst2q_lane_f32(a: *mut f32, b: float32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] @@ -66368,7 +66368,7 @@ pub unsafe fn vst2_lane_s8(a: *mut i8, b: int8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] @@ -66386,7 +66386,7 @@ pub unsafe fn vst2_lane_s16(a: *mut i16, b: int16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] @@ -66404,7 +66404,7 @@ pub unsafe fn vst2q_lane_s16(a: *mut i16, b: int16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] @@ -66422,7 +66422,7 @@ pub unsafe fn vst2_lane_s32(a: *mut i32, b: int32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst2, LANE = 0))] @@ -66440,7 +66440,7 @@ pub unsafe fn vst2q_lane_s32(a: *mut i32, b: int32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] @@ -66465,7 +66465,7 @@ pub unsafe fn vst2_lane_u8(a: *mut u8, b: uint8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] @@ -66490,7 +66490,7 @@ pub unsafe fn vst2_lane_u16(a: *mut u16, b: uint16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] @@ -66515,7 +66515,7 @@ pub unsafe fn vst2q_lane_u16(a: *mut u16, b: uint16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] @@ -66540,7 +66540,7 @@ pub unsafe fn vst2_lane_u32(a: *mut u32, b: uint32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] @@ -66565,7 +66565,7 @@ pub unsafe fn vst2q_lane_u32(a: *mut u32, b: uint32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] @@ -66590,7 +66590,7 @@ pub unsafe fn vst2_lane_p8(a: *mut p8, b: poly8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] @@ -66615,7 +66615,7 @@ pub unsafe fn vst2_lane_p16(a: *mut p16, b: poly16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2, LANE = 0))] @@ -66640,7 +66640,7 @@ pub unsafe fn vst2q_lane_p16(a: *mut p16, b: poly16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,aes")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -66663,7 +66663,7 @@ pub unsafe fn vst2_p64(a: *mut p64, b: poly64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -66675,7 +66675,7 @@ pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -66687,7 +66687,7 @@ pub unsafe fn vst2_s64(a: *mut i64, b: int64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -66710,7 +66710,7 @@ pub unsafe fn vst2_u64(a: *mut u64, b: uint64x1x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] @@ -66733,7 +66733,7 @@ pub unsafe fn vst2_u8(a: *mut u8, b: uint8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] @@ -66756,7 +66756,7 @@ pub unsafe fn vst2q_u8(a: *mut u8, b: uint8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] @@ -66779,7 +66779,7 @@ pub unsafe fn vst2_u16(a: *mut u16, b: uint16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] @@ -66802,7 +66802,7 @@ pub unsafe fn vst2q_u16(a: *mut u16, b: uint16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] @@ -66825,7 +66825,7 @@ pub unsafe fn vst2_u32(a: *mut u32, b: uint32x2x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] @@ -66848,7 +66848,7 @@ pub unsafe fn vst2q_u32(a: *mut u32, b: uint32x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] @@ -66871,7 +66871,7 @@ pub unsafe fn vst2_p8(a: *mut p8, b: poly8x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] @@ -66894,7 +66894,7 @@ pub unsafe fn vst2q_p8(a: *mut p8, b: poly8x16x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] @@ -66917,7 +66917,7 @@ pub unsafe fn vst2_p16(a: *mut p16, b: poly16x4x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst2q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst2))] @@ -66940,7 +66940,7 @@ pub unsafe fn vst2q_p16(a: *mut p16, b: poly16x8x2_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -66959,7 +66959,7 @@ pub unsafe fn vst3_f16(a: *mut f16, b: float16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -66978,7 +66978,7 @@ pub unsafe fn vst3q_f16(a: *mut f16, b: float16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] @@ -66999,7 +66999,7 @@ pub unsafe fn vst3_f16(a: *mut f16, b: float16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] @@ -67020,7 +67020,7 @@ pub unsafe fn vst3q_f16(a: *mut f16, b: float16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -67032,7 +67032,7 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -67044,7 +67044,7 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -67056,7 +67056,7 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -67068,7 +67068,7 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -67080,7 +67080,7 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -67092,7 +67092,7 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -67104,7 +67104,7 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -67116,7 +67116,7 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -67128,7 +67128,7 @@ pub unsafe fn vst3_f32(a: *mut f32, b: float32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -67140,7 +67140,7 @@ pub unsafe fn vst3q_f32(a: *mut f32, b: float32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -67152,7 +67152,7 @@ pub unsafe fn vst3_s8(a: *mut i8, b: int8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -67164,7 +67164,7 @@ pub unsafe fn vst3q_s8(a: *mut i8, b: int8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -67176,7 +67176,7 @@ pub unsafe fn vst3_s16(a: *mut i16, b: int16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -67188,7 +67188,7 @@ pub unsafe fn vst3q_s16(a: *mut i16, b: int16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -67200,7 +67200,7 @@ pub unsafe fn vst3_s32(a: *mut i32, b: int32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -67212,7 +67212,7 @@ pub unsafe fn vst3q_s32(a: *mut i32, b: int32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -67240,7 +67240,7 @@ pub unsafe fn vst3_lane_f16(a: *mut f16, b: float16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -67268,7 +67268,7 @@ pub unsafe fn vst3q_lane_f16(a: *mut f16, b: float16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -67291,7 +67291,7 @@ pub unsafe fn vst3_lane_f16(a: *mut f16, b: float16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -67314,7 +67314,7 @@ pub unsafe fn vst3q_lane_f16(a: *mut f16, b: float16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] @@ -67339,7 +67339,7 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] @@ -67364,7 +67364,7 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] @@ -67382,7 +67382,7 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] @@ -67407,7 +67407,7 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] @@ -67432,7 +67432,7 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] @@ -67457,7 +67457,7 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst3, LANE = 0))] @@ -67482,7 +67482,7 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -67503,7 +67503,7 @@ pub unsafe fn vst3_lane_f32(a: *mut f32, b: float32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -67524,7 +67524,7 @@ pub unsafe fn vst3q_lane_f32(a: *mut f32, b: float32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -67545,7 +67545,7 @@ pub unsafe fn vst3_lane_s8(a: *mut i8, b: int8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -67566,7 +67566,7 @@ pub unsafe fn vst3_lane_s16(a: *mut i16, b: int16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -67587,7 +67587,7 @@ pub unsafe fn vst3q_lane_s16(a: *mut i16, b: int16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -67608,7 +67608,7 @@ pub unsafe fn vst3_lane_s32(a: *mut i32, b: int32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -67629,7 +67629,7 @@ pub unsafe fn vst3q_lane_s32(a: *mut i32, b: int32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] @@ -67654,7 +67654,7 @@ pub unsafe fn vst3_lane_u8(a: *mut u8, b: uint8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] @@ -67679,7 +67679,7 @@ pub unsafe fn vst3_lane_u16(a: *mut u16, b: uint16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] @@ -67704,7 +67704,7 @@ pub unsafe fn vst3q_lane_u16(a: *mut u16, b: uint16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] @@ -67729,7 +67729,7 @@ pub unsafe fn vst3_lane_u32(a: *mut u32, b: uint32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] @@ -67754,7 +67754,7 @@ pub unsafe fn vst3q_lane_u32(a: *mut u32, b: uint32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] @@ -67779,7 +67779,7 @@ pub unsafe fn vst3_lane_p8(a: *mut p8, b: poly8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] @@ -67804,7 +67804,7 @@ pub unsafe fn vst3_lane_p16(a: *mut p16, b: poly16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3, LANE = 0))] @@ -67829,7 +67829,7 @@ pub unsafe fn vst3q_lane_p16(a: *mut p16, b: poly16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,aes")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -67852,7 +67852,7 @@ pub unsafe fn vst3_p64(a: *mut p64, b: poly64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -67864,7 +67864,7 @@ pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -67876,7 +67876,7 @@ pub unsafe fn vst3_s64(a: *mut i64, b: int64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -67899,7 +67899,7 @@ pub unsafe fn vst3_u64(a: *mut u64, b: uint64x1x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] @@ -67922,7 +67922,7 @@ pub unsafe fn vst3_u8(a: *mut u8, b: uint8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] @@ -67945,7 +67945,7 @@ pub unsafe fn vst3q_u8(a: *mut u8, b: uint8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] @@ -67968,7 +67968,7 @@ pub unsafe fn vst3_u16(a: *mut u16, b: uint16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] @@ -67991,7 +67991,7 @@ pub unsafe fn vst3q_u16(a: *mut u16, b: uint16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] @@ -68014,7 +68014,7 @@ pub unsafe fn vst3_u32(a: *mut u32, b: uint32x2x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] @@ -68037,7 +68037,7 @@ pub unsafe fn vst3q_u32(a: *mut u32, b: uint32x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] @@ -68060,7 +68060,7 @@ pub unsafe fn vst3_p8(a: *mut p8, b: poly8x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] @@ -68083,7 +68083,7 @@ pub unsafe fn vst3q_p8(a: *mut p8, b: poly8x16x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] @@ -68106,7 +68106,7 @@ pub unsafe fn vst3_p16(a: *mut p16, b: poly16x4x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst3q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst3))] @@ -68129,7 +68129,7 @@ pub unsafe fn vst3q_p16(a: *mut p16, b: poly16x8x3_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -68155,7 +68155,7 @@ pub unsafe fn vst4_f16(a: *mut f16, b: float16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -68181,7 +68181,7 @@ pub unsafe fn vst4q_f16(a: *mut f16, b: float16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] @@ -68202,7 +68202,7 @@ pub unsafe fn vst4_f16(a: *mut f16, b: float16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[cfg_attr(target_arch = "arm", target_feature(enable = "fp16"))] @@ -68223,7 +68223,7 @@ pub unsafe fn vst4q_f16(a: *mut f16, b: float16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -68246,7 +68246,7 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -68269,7 +68269,7 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -68285,7 +68285,7 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -68308,7 +68308,7 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -68331,7 +68331,7 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -68354,7 +68354,7 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -68377,7 +68377,7 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -68400,7 +68400,7 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -68412,7 +68412,7 @@ pub unsafe fn vst4_f32(a: *mut f32, b: float32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -68424,7 +68424,7 @@ pub unsafe fn vst4q_f32(a: *mut f32, b: float32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -68436,7 +68436,7 @@ pub unsafe fn vst4_s8(a: *mut i8, b: int8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -68448,7 +68448,7 @@ pub unsafe fn vst4q_s8(a: *mut i8, b: int8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -68460,7 +68460,7 @@ pub unsafe fn vst4_s16(a: *mut i16, b: int16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -68472,7 +68472,7 @@ pub unsafe fn vst4q_s16(a: *mut i16, b: int16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -68484,7 +68484,7 @@ pub unsafe fn vst4_s32(a: *mut i32, b: int32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -68496,7 +68496,7 @@ pub unsafe fn vst4q_s32(a: *mut i32, b: int32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -68525,7 +68525,7 @@ pub unsafe fn vst4_lane_f16(a: *mut f16, b: float16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -68554,7 +68554,7 @@ pub unsafe fn vst4q_lane_f16(a: *mut f16, b: float16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -68584,7 +68584,7 @@ pub unsafe fn vst4_lane_f16(a: *mut f16, b: float16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -68614,7 +68614,7 @@ pub unsafe fn vst4q_lane_f16(a: *mut f16, b: float16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] @@ -68640,7 +68640,7 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] @@ -68666,7 +68666,7 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] @@ -68692,7 +68692,7 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] @@ -68718,7 +68718,7 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] @@ -68744,7 +68744,7 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] @@ -68770,7 +68770,7 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[cfg_attr(test, assert_instr(vst4, LANE = 0))] @@ -68796,7 +68796,7 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -68824,7 +68824,7 @@ pub unsafe fn vst4_lane_f32(a: *mut f32, b: float32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_f32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -68852,7 +68852,7 @@ pub unsafe fn vst4q_lane_f32(a: *mut f32, b: float32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -68873,7 +68873,7 @@ pub unsafe fn vst4_lane_s8(a: *mut i8, b: int8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -68901,7 +68901,7 @@ pub unsafe fn vst4_lane_s16(a: *mut i16, b: int16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -68929,7 +68929,7 @@ pub unsafe fn vst4q_lane_s16(a: *mut i16, b: int16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -68957,7 +68957,7 @@ pub unsafe fn vst4_lane_s32(a: *mut i32, b: int32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_s32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[rustc_legacy_const_generics(2)] @@ -68985,7 +68985,7 @@ pub unsafe fn vst4q_lane_s32(a: *mut i32, b: int32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] @@ -69010,7 +69010,7 @@ pub unsafe fn vst4_lane_u8(a: *mut u8, b: uint8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] @@ -69035,7 +69035,7 @@ pub unsafe fn vst4_lane_u16(a: *mut u16, b: uint16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] @@ -69060,7 +69060,7 @@ pub unsafe fn vst4q_lane_u16(a: *mut u16, b: uint16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] @@ -69085,7 +69085,7 @@ pub unsafe fn vst4_lane_u32(a: *mut u32, b: uint32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] @@ -69110,7 +69110,7 @@ pub unsafe fn vst4q_lane_u32(a: *mut u32, b: uint32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] @@ -69135,7 +69135,7 @@ pub unsafe fn vst4_lane_p8(a: *mut p8, b: poly8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] @@ -69160,7 +69160,7 @@ pub unsafe fn vst4_lane_p16(a: *mut p16, b: poly16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_lane_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4, LANE = 0))] @@ -69185,7 +69185,7 @@ pub unsafe fn vst4q_lane_p16(a: *mut p16, b: poly16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,aes")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -69208,7 +69208,7 @@ pub unsafe fn vst4_p64(a: *mut p64, b: poly64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[cfg(target_arch = "arm")] #[target_feature(enable = "neon,v7")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -69220,7 +69220,7 @@ pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_s64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(not(target_arch = "arm"))] #[stable(feature = "neon_intrinsics", since = "1.59.0")] @@ -69232,7 +69232,7 @@ pub unsafe fn vst4_s64(a: *mut i64, b: int64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u64)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -69255,7 +69255,7 @@ pub unsafe fn vst4_u64(a: *mut u64, b: uint64x1x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] @@ -69278,7 +69278,7 @@ pub unsafe fn vst4_u8(a: *mut u8, b: uint8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] @@ -69301,7 +69301,7 @@ pub unsafe fn vst4q_u8(a: *mut u8, b: uint8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] @@ -69324,7 +69324,7 @@ pub unsafe fn vst4_u16(a: *mut u16, b: uint16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] @@ -69347,7 +69347,7 @@ pub unsafe fn vst4q_u16(a: *mut u16, b: uint16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] @@ -69370,7 +69370,7 @@ pub unsafe fn vst4_u32(a: *mut u32, b: uint32x2x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_u32)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] @@ -69393,7 +69393,7 @@ pub unsafe fn vst4q_u32(a: *mut u32, b: uint32x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] @@ -69416,7 +69416,7 @@ pub unsafe fn vst4_p8(a: *mut p8, b: poly8x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p8)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] @@ -69439,7 +69439,7 @@ pub unsafe fn vst4q_p8(a: *mut p8, b: poly8x16x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] @@ -69462,7 +69462,7 @@ pub unsafe fn vst4_p16(a: *mut p16, b: poly16x4x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vst4q_p16)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vst4))] @@ -69485,7 +69485,7 @@ pub unsafe fn vst4q_p16(a: *mut p16, b: poly16x8x4_t) { #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vstrq_p128)"] #[doc = "## Safety"] #[doc = " * Neon intrinsic unsafe"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -69506,7 +69506,7 @@ pub unsafe fn vstrq_p128(a: *mut p128, b: p128) { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f16"))] #[cfg_attr( @@ -69528,7 +69528,7 @@ pub fn vsub_f16(a: float16x4_t, b: float16x4_t) -> float16x4_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f16"))] #[cfg_attr( @@ -69550,7 +69550,7 @@ pub fn vsubq_f16(a: float16x8_t, b: float16x8_t) -> float16x8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] @@ -69571,7 +69571,7 @@ pub fn vsub_f32(a: float32x2_t, b: float32x2_t) -> float32x2_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.f32"))] @@ -69592,7 +69592,7 @@ pub fn vsubq_f32(a: float32x4_t, b: float32x4_t) -> float32x4_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] @@ -69613,7 +69613,7 @@ pub fn vsub_s16(a: int16x4_t, b: int16x4_t) -> int16x4_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] @@ -69634,7 +69634,7 @@ pub fn vsubq_s16(a: int16x8_t, b: int16x8_t) -> int16x8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] @@ -69655,7 +69655,7 @@ pub fn vsub_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i16"))] @@ -69676,7 +69676,7 @@ pub fn vsubq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] @@ -69697,7 +69697,7 @@ pub fn vsub_s32(a: int32x2_t, b: int32x2_t) -> int32x2_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] @@ -69718,7 +69718,7 @@ pub fn vsubq_s32(a: int32x4_t, b: int32x4_t) -> int32x4_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] @@ -69739,7 +69739,7 @@ pub fn vsub_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i32"))] @@ -69760,7 +69760,7 @@ pub fn vsubq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] @@ -69781,7 +69781,7 @@ pub fn vsub_s64(a: int64x1_t, b: int64x1_t) -> int64x1_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] @@ -69802,7 +69802,7 @@ pub fn vsubq_s64(a: int64x2_t, b: int64x2_t) -> int64x2_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] @@ -69823,7 +69823,7 @@ pub fn vsub_u64(a: uint64x1_t, b: uint64x1_t) -> uint64x1_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i64"))] @@ -69844,7 +69844,7 @@ pub fn vsubq_u64(a: uint64x2_t, b: uint64x2_t) -> uint64x2_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] @@ -69865,7 +69865,7 @@ pub fn vsub_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] @@ -69886,7 +69886,7 @@ pub fn vsubq_s8(a: int8x16_t, b: int8x16_t) -> int8x16_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsub_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] @@ -69907,7 +69907,7 @@ pub fn vsub_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Subtract"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vsub.i8"))] @@ -69928,7 +69928,7 @@ pub fn vsubq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -69950,7 +69950,7 @@ pub fn vsubhn_high_s16(a: int8x8_t, b: int16x8_t, c: int16x8_t) -> int8x16_t { } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -69972,7 +69972,7 @@ pub fn vsubhn_high_s32(a: int16x4_t, b: int32x4_t, c: int32x4_t) -> int16x8_t { } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -69994,7 +69994,7 @@ pub fn vsubhn_high_s64(a: int32x2_t, b: int64x2_t, c: int64x2_t) -> int32x4_t { } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -70016,7 +70016,7 @@ pub fn vsubhn_high_u16(a: uint8x8_t, b: uint16x8_t, c: uint16x8_t) -> uint8x16_t } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -70038,7 +70038,7 @@ pub fn vsubhn_high_u32(a: uint16x4_t, b: uint32x4_t, c: uint32x4_t) -> uint16x8_ } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_high_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -70060,7 +70060,7 @@ pub fn vsubhn_high_u64(a: uint32x2_t, b: uint64x2_t, c: uint64x2_t) -> uint32x4_ } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -70082,7 +70082,7 @@ pub fn vsubhn_s16(a: int16x8_t, b: int16x8_t) -> int8x8_t { } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -70104,7 +70104,7 @@ pub fn vsubhn_s32(a: int32x4_t, b: int32x4_t) -> int16x4_t { } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_s64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -70126,7 +70126,7 @@ pub fn vsubhn_s64(a: int64x2_t, b: int64x2_t) -> int32x2_t { } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -70148,7 +70148,7 @@ pub fn vsubhn_u16(a: uint16x8_t, b: uint16x8_t) -> uint8x8_t { } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -70170,7 +70170,7 @@ pub fn vsubhn_u32(a: uint32x4_t, b: uint32x4_t) -> uint16x4_t { } #[doc = "Subtract returning high narrow"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubhn_u64)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubhn))] @@ -70192,7 +70192,7 @@ pub fn vsubhn_u64(a: uint64x2_t, b: uint64x2_t) -> uint32x2_t { } #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] @@ -70217,7 +70217,7 @@ pub fn vsubl_s8(a: int8x8_t, b: int8x8_t) -> int16x8_t { } #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] @@ -70242,7 +70242,7 @@ pub fn vsubl_s16(a: int16x4_t, b: int16x4_t) -> int32x4_t { } #[doc = "Signed Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] @@ -70267,7 +70267,7 @@ pub fn vsubl_s32(a: int32x2_t, b: int32x2_t) -> int64x2_t { } #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] @@ -70292,7 +70292,7 @@ pub fn vsubl_u8(a: uint8x8_t, b: uint8x8_t) -> uint16x8_t { } #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] @@ -70317,7 +70317,7 @@ pub fn vsubl_u16(a: uint16x4_t, b: uint16x4_t) -> uint32x4_t { } #[doc = "Unsigned Subtract Long"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubl_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubl))] @@ -70342,7 +70342,7 @@ pub fn vsubl_u32(a: uint32x2_t, b: uint32x2_t) -> uint64x2_t { } #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] @@ -70363,7 +70363,7 @@ pub fn vsubw_s8(a: int16x8_t, b: int8x8_t) -> int16x8_t { } #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] @@ -70384,7 +70384,7 @@ pub fn vsubw_s16(a: int32x4_t, b: int16x4_t) -> int32x4_t { } #[doc = "Signed Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] @@ -70405,7 +70405,7 @@ pub fn vsubw_s32(a: int64x2_t, b: int32x2_t) -> int64x2_t { } #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] @@ -70426,7 +70426,7 @@ pub fn vsubw_u8(a: uint16x8_t, b: uint8x8_t) -> uint16x8_t { } #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] @@ -70447,7 +70447,7 @@ pub fn vsubw_u16(a: uint32x4_t, b: uint16x4_t) -> uint32x4_t { } #[doc = "Unsigned Subtract Wide"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsubw_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsubw))] @@ -70468,7 +70468,7 @@ pub fn vsubw_u32(a: uint64x2_t, b: uint32x2_t) -> uint64x2_t { } #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -70496,7 +70496,7 @@ pub fn vsudot_lane_s32(a: int32x2_t, b: int8x8_t, c: uint8x8_t) } #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -70528,7 +70528,7 @@ pub fn vsudot_lane_s32(a: int32x2_t, b: int8x8_t, c: uint8x8_t) } #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -70557,7 +70557,7 @@ pub fn vsudotq_lane_s32(a: int32x4_t, b: int8x16_t, c: uint8x8_ } #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -70591,7 +70591,7 @@ pub fn vsudotq_lane_s32(a: int32x4_t, b: int8x16_t, c: uint8x8_ } #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudot_laneq_s32)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 1))] @@ -70611,7 +70611,7 @@ pub fn vsudot_laneq_s32(a: int32x2_t, b: int8x8_t, c: uint8x16_ } #[doc = "Dot product index form with signed and unsigned integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vsudotq_laneq_s32)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vsudot, LANE = 1))] @@ -70632,7 +70632,7 @@ pub fn vsudotq_laneq_s32(a: int32x4_t, b: int8x16_t, c: uint8x1 } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -70647,7 +70647,7 @@ fn vtbl1(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -70658,7 +70658,7 @@ pub fn vtbl1_s8(a: int8x8_t, b: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70670,7 +70670,7 @@ pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70687,7 +70687,7 @@ pub fn vtbl1_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70699,7 +70699,7 @@ pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl1_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70716,7 +70716,7 @@ pub fn vtbl1_p8(a: poly8x8_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -70731,7 +70731,7 @@ fn vtbl2(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -70742,7 +70742,7 @@ pub fn vtbl2_s8(a: int8x8x2_t, b: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70754,7 +70754,7 @@ pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70773,7 +70773,7 @@ pub fn vtbl2_u8(a: uint8x8x2_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70785,7 +70785,7 @@ pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl2_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70804,7 +70804,7 @@ pub fn vtbl2_p8(a: poly8x8x2_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -70819,7 +70819,7 @@ fn vtbl3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -70830,7 +70830,7 @@ pub fn vtbl3_s8(a: int8x8x3_t, b: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70849,7 +70849,7 @@ pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70874,7 +70874,7 @@ pub fn vtbl3_u8(a: uint8x8x3_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70893,7 +70893,7 @@ pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl3_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70918,7 +70918,7 @@ pub fn vtbl3_p8(a: poly8x8x3_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -70933,7 +70933,7 @@ fn vtbl4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] @@ -70944,7 +70944,7 @@ pub fn vtbl4_s8(a: int8x8x4_t, b: int8x8_t) -> int8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70964,7 +70964,7 @@ pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -70991,7 +70991,7 @@ pub fn vtbl4_u8(a: uint8x8x4_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -71011,7 +71011,7 @@ pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbl4_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon")] #[cfg(target_arch = "arm")] @@ -71038,7 +71038,7 @@ pub fn vtbl4_p8(a: poly8x8x4_t, b: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -71052,7 +71052,7 @@ fn vtbx1(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -71062,7 +71062,7 @@ pub fn vtbx1_s8(a: int8x8_t, b: int8x8_t, c: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71073,7 +71073,7 @@ pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71090,7 +71090,7 @@ pub fn vtbx1_u8(a: uint8x8_t, b: uint8x8_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71101,7 +71101,7 @@ pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx1_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71118,7 +71118,7 @@ pub fn vtbx1_p8(a: poly8x8_t, b: poly8x8_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -71132,7 +71132,7 @@ fn vtbx2(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -71142,7 +71142,7 @@ pub fn vtbx2_s8(a: int8x8_t, b: int8x8x2_t, c: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71160,7 +71160,7 @@ pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71184,7 +71184,7 @@ pub fn vtbx2_u8(a: uint8x8_t, b: uint8x8x2_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71202,7 +71202,7 @@ pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx2_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71226,7 +71226,7 @@ pub fn vtbx2_p8(a: poly8x8_t, b: poly8x8x2_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -71240,7 +71240,7 @@ fn vtbx3(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t) -> int } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -71250,7 +71250,7 @@ pub fn vtbx3_s8(a: int8x8_t, b: int8x8x3_t, c: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71269,7 +71269,7 @@ pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71295,7 +71295,7 @@ pub fn vtbx3_u8(a: uint8x8_t, b: uint8x8x3_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71314,7 +71314,7 @@ pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx3_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71340,7 +71340,7 @@ pub fn vtbx3_p8(a: poly8x8_t, b: poly8x8x3_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] #[unstable(feature = "stdarch_arm_neon_intrinsics", issue = "111800")] @@ -71361,7 +71361,7 @@ fn vtbx4(a: int8x8_t, b: int8x8_t, c: int8x8_t, d: int8x8_t, e: int8x8_t, f: int } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71381,7 +71381,7 @@ pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_s8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71409,7 +71409,7 @@ pub fn vtbx4_s8(a: int8x8_t, b: int8x8x4_t, c: int8x8_t) -> int8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71429,7 +71429,7 @@ pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_u8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71457,7 +71457,7 @@ pub fn vtbx4_u8(a: uint8x8_t, b: uint8x8x4_t, c: uint8x8_t) -> uint8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71477,7 +71477,7 @@ pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Extended table look-up"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtbx4_p8)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,v7")] #[cfg(target_arch = "arm")] @@ -71505,7 +71505,7 @@ pub fn vtbx4_p8(a: poly8x8_t, b: poly8x8x4_t, c: uint8x8_t) -> poly8x8_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( @@ -71535,7 +71535,7 @@ pub fn vtrn_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] #[cfg_attr( @@ -71565,7 +71565,7 @@ pub fn vtrnq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71594,7 +71594,7 @@ pub fn vtrn_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71623,7 +71623,7 @@ pub fn vtrn_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71652,7 +71652,7 @@ pub fn vtrn_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71681,7 +71681,7 @@ pub fn vtrnq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71710,7 +71710,7 @@ pub fn vtrn_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71747,7 +71747,7 @@ pub fn vtrnq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71776,7 +71776,7 @@ pub fn vtrn_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71805,7 +71805,7 @@ pub fn vtrnq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71834,7 +71834,7 @@ pub fn vtrnq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71863,7 +71863,7 @@ pub fn vtrn_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71900,7 +71900,7 @@ pub fn vtrnq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71929,7 +71929,7 @@ pub fn vtrn_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71958,7 +71958,7 @@ pub fn vtrnq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -71987,7 +71987,7 @@ pub fn vtrnq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -72016,7 +72016,7 @@ pub fn vtrn_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -72053,7 +72053,7 @@ pub fn vtrnq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrn_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -72082,7 +72082,7 @@ pub fn vtrn_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { } #[doc = "Transpose elements"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtrnq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -72111,7 +72111,7 @@ pub fn vtrnq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72136,7 +72136,7 @@ pub fn vtst_s8(a: int8x8_t, b: int8x8_t) -> uint8x8_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72161,7 +72161,7 @@ pub fn vtstq_s8(a: int8x16_t, b: int8x16_t) -> uint8x16_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72186,7 +72186,7 @@ pub fn vtst_s16(a: int16x4_t, b: int16x4_t) -> uint16x4_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72211,7 +72211,7 @@ pub fn vtstq_s16(a: int16x8_t, b: int16x8_t) -> uint16x8_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72236,7 +72236,7 @@ pub fn vtst_s32(a: int32x2_t, b: int32x2_t) -> uint32x2_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72261,7 +72261,7 @@ pub fn vtstq_s32(a: int32x4_t, b: int32x4_t) -> uint32x4_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72286,7 +72286,7 @@ pub fn vtst_p8(a: poly8x8_t, b: poly8x8_t) -> uint8x8_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72311,7 +72311,7 @@ pub fn vtstq_p8(a: poly8x16_t, b: poly8x16_t) -> uint8x16_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72336,7 +72336,7 @@ pub fn vtst_p16(a: poly16x4_t, b: poly16x4_t) -> uint16x4_t { } #[doc = "Signed compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72361,7 +72361,7 @@ pub fn vtstq_p16(a: poly16x8_t, b: poly16x8_t) -> uint16x8_t { } #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72386,7 +72386,7 @@ pub fn vtst_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8_t { } #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72411,7 +72411,7 @@ pub fn vtstq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16_t { } #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72436,7 +72436,7 @@ pub fn vtst_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4_t { } #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72461,7 +72461,7 @@ pub fn vtstq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8_t { } #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtst_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72486,7 +72486,7 @@ pub fn vtst_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2_t { } #[doc = "Unsigned compare bitwise Test bits nonzero"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vtstq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtst))] @@ -72511,7 +72511,7 @@ pub fn vtstq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4_t { } #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -72539,7 +72539,7 @@ pub fn vusdot_lane_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) } #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -72571,7 +72571,7 @@ pub fn vusdot_lane_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) } #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -72600,7 +72600,7 @@ pub fn vusdotq_lane_s32(a: int32x4_t, b: uint8x16_t, c: int8x8_ } #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_lane_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] @@ -72634,7 +72634,7 @@ pub fn vusdotq_lane_s32(a: int32x4_t, b: uint8x16_t, c: int8x8_ } #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,i8mm")] @@ -72655,7 +72655,7 @@ pub fn vusdot_laneq_s32(a: int32x2_t, b: uint8x8_t, c: int8x16_ } #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_laneq_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,i8mm")] @@ -72681,7 +72681,7 @@ pub fn vusdot_laneq_s32(a: int32x2_t, b: uint8x8_t, c: int8x16_ } #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "little")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,i8mm")] @@ -72703,7 +72703,7 @@ pub fn vusdotq_laneq_s32(a: int32x4_t, b: uint8x16_t, c: int8x1 } #[doc = "Dot product index form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_laneq_s32)"] -#[inline(always)] +#[inline] #[cfg(target_endian = "big")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[target_feature(enable = "neon,i8mm")] @@ -72731,7 +72731,7 @@ pub fn vusdotq_laneq_s32(a: int32x4_t, b: uint8x16_t, c: int8x1 } #[doc = "Dot product vector form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdot_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] @@ -72760,7 +72760,7 @@ pub fn vusdot_s32(a: int32x2_t, b: uint8x8_t, c: int8x8_t) -> int32x2_t { } #[doc = "Dot product vector form with unsigned and signed integers"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusdotq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vusdot))] @@ -72789,7 +72789,7 @@ pub fn vusdotq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { } #[doc = "Unsigned and signed 8-bit integer matrix multiply-accumulate"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vusmmlaq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon,i8mm")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v8"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(nop))] @@ -72818,7 +72818,7 @@ pub fn vusmmlaq_s32(a: int32x4_t, b: uint8x16_t, c: int8x16_t) -> int32x4_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( @@ -72848,7 +72848,7 @@ pub fn vuzp_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] #[cfg_attr( @@ -72878,7 +72878,7 @@ pub fn vuzpq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -72907,7 +72907,7 @@ pub fn vuzp_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -72936,7 +72936,7 @@ pub fn vuzp_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -72965,7 +72965,7 @@ pub fn vuzp_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -72994,7 +72994,7 @@ pub fn vuzpq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73023,7 +73023,7 @@ pub fn vuzp_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73060,7 +73060,7 @@ pub fn vuzpq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73089,7 +73089,7 @@ pub fn vuzp_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73118,7 +73118,7 @@ pub fn vuzpq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73147,7 +73147,7 @@ pub fn vuzpq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73176,7 +73176,7 @@ pub fn vuzp_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73213,7 +73213,7 @@ pub fn vuzpq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73242,7 +73242,7 @@ pub fn vuzp_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73271,7 +73271,7 @@ pub fn vuzpq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73300,7 +73300,7 @@ pub fn vuzpq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73329,7 +73329,7 @@ pub fn vuzp_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73366,7 +73366,7 @@ pub fn vuzpq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzp_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73395,7 +73395,7 @@ pub fn vuzp_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { } #[doc = "Unzip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vuzpq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vuzp))] @@ -73424,7 +73424,7 @@ pub fn vuzpq_p16(a: poly16x8_t, b: poly16x8_t) -> poly16x8x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vzip.16"))] #[cfg_attr( @@ -73454,7 +73454,7 @@ pub fn vzip_f16(a: float16x4_t, b: float16x4_t) -> float16x4x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f16)"] -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr("vzip.16"))] #[cfg_attr( @@ -73484,7 +73484,7 @@ pub fn vzipq_f16(a: float16x8_t, b: float16x8_t) -> float16x8x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -73513,7 +73513,7 @@ pub fn vzip_f32(a: float32x2_t, b: float32x2_t) -> float32x2x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -73542,7 +73542,7 @@ pub fn vzip_s32(a: int32x2_t, b: int32x2_t) -> int32x2x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vtrn))] @@ -73571,7 +73571,7 @@ pub fn vzip_u32(a: uint32x2_t, b: uint32x2_t) -> uint32x2x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] @@ -73600,7 +73600,7 @@ pub fn vzip_s8(a: int8x8_t, b: int8x8_t) -> int8x8x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] @@ -73629,7 +73629,7 @@ pub fn vzip_s16(a: int16x4_t, b: int16x4_t) -> int16x4x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] @@ -73658,7 +73658,7 @@ pub fn vzip_u8(a: uint8x8_t, b: uint8x8_t) -> uint8x8x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] @@ -73687,7 +73687,7 @@ pub fn vzip_u16(a: uint16x4_t, b: uint16x4_t) -> uint16x4x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] @@ -73716,7 +73716,7 @@ pub fn vzip_p8(a: poly8x8_t, b: poly8x8_t) -> poly8x8x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzip_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vzip))] @@ -73745,7 +73745,7 @@ pub fn vzip_p16(a: poly16x4_t, b: poly16x4_t) -> poly16x4x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_f32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -73774,7 +73774,7 @@ pub fn vzipq_f32(a: float32x4_t, b: float32x4_t) -> float32x4x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -73811,7 +73811,7 @@ pub fn vzipq_s8(a: int8x16_t, b: int8x16_t) -> int8x16x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -73840,7 +73840,7 @@ pub fn vzipq_s16(a: int16x8_t, b: int16x8_t) -> int16x8x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_s32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -73869,7 +73869,7 @@ pub fn vzipq_s32(a: int32x4_t, b: int32x4_t) -> int32x4x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -73906,7 +73906,7 @@ pub fn vzipq_u8(a: uint8x16_t, b: uint8x16_t) -> uint8x16x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -73935,7 +73935,7 @@ pub fn vzipq_u16(a: uint16x8_t, b: uint16x8_t) -> uint16x8x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_u32)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -73964,7 +73964,7 @@ pub fn vzipq_u32(a: uint32x4_t, b: uint32x4_t) -> uint32x4x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p8)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] @@ -74001,7 +74001,7 @@ pub fn vzipq_p8(a: poly8x16_t, b: poly8x16_t) -> poly8x16x2_t { } #[doc = "Zip vectors"] #[doc = "[Arm's documentation](https://developer.arm.com/architectures/instruction-sets/intrinsics/vzipq_p16)"] -#[inline(always)] +#[inline] #[target_feature(enable = "neon")] #[cfg_attr(target_arch = "arm", target_feature(enable = "v7"))] #[cfg_attr(all(test, target_arch = "arm"), assert_instr(vorr))] diff --git a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs index ab80b499b05f0..dc467fd307810 100644 --- a/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs +++ b/library/stdarch/crates/stdarch-gen-arm/src/intrinsic.rs @@ -1741,7 +1741,7 @@ fn create_tokens(intrinsic: &Intrinsic, endianness: Endianness, tokens: &mut Tok ); } - tokens.append_all(quote! { #[inline(always)] }); + tokens.append_all(quote! { #[inline] }); match endianness { Endianness::Little => tokens.append_all(quote! { #[cfg(target_endian = "little")] }), From 6169e94e9bf3d1584746922df6f3702482d32cc4 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Wed, 6 May 2026 16:28:26 +0100 Subject: [PATCH 27/30] Remove #[inline(always)] from hexagon intrinsic generator & re-generate intrinsics --- .../crates/core_arch/src/hexagon/v128.rs | 952 +++++++++--------- .../crates/core_arch/src/hexagon/v64.rs | 952 +++++++++--------- .../crates/stdarch-gen-hexagon/src/main.rs | 4 +- 3 files changed, 954 insertions(+), 954 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/hexagon/v128.rs b/library/stdarch/crates/core_arch/src/hexagon/v128.rs index 10263382938b6..1f0566af78ef3 100644 --- a/library/stdarch/crates/core_arch/src/hexagon/v128.rs +++ b/library/stdarch/crates/core_arch/src/hexagon/v128.rs @@ -1043,7 +1043,7 @@ unsafe extern "unadjusted" { /// /// Instruction Type: LD /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(extractw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1055,7 +1055,7 @@ pub unsafe fn Q6_R_vextract_VR(vu: HvxVector, rs: i32) -> i32 { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(hi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1067,7 +1067,7 @@ pub unsafe fn Q6_V_hi_W(vss: HvxVectorPair) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(lo))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1079,7 +1079,7 @@ pub unsafe fn Q6_V_lo_W(vss: HvxVectorPair) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(lvsplatw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1091,7 +1091,7 @@ pub unsafe fn Q6_V_vsplat_R(rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1103,7 +1103,7 @@ pub unsafe fn Q6_Vuh_vabsdiff_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1115,7 +1115,7 @@ pub unsafe fn Q6_Vub_vabsdiff_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1127,7 +1127,7 @@ pub unsafe fn Q6_Vuh_vabsdiff_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1139,7 +1139,7 @@ pub unsafe fn Q6_Vuw_vabsdiff_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1151,7 +1151,7 @@ pub unsafe fn Q6_Vh_vabs_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1163,7 +1163,7 @@ pub unsafe fn Q6_Vh_vabs_Vh_sat(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1175,7 +1175,7 @@ pub unsafe fn Q6_Vw_vabs_Vw(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsw_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1187,7 +1187,7 @@ pub unsafe fn Q6_Vw_vabs_Vw_sat(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1199,7 +1199,7 @@ pub unsafe fn Q6_Vb_vadd_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1211,7 +1211,7 @@ pub unsafe fn Q6_Wb_vadd_WbWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1223,7 +1223,7 @@ pub unsafe fn Q6_Vh_vadd_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddh_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1235,7 +1235,7 @@ pub unsafe fn Q6_Wh_vadd_WhWh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1247,7 +1247,7 @@ pub unsafe fn Q6_Vh_vadd_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1259,7 +1259,7 @@ pub unsafe fn Q6_Wh_vadd_WhWh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1271,7 +1271,7 @@ pub unsafe fn Q6_Ww_vadd_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1283,7 +1283,7 @@ pub unsafe fn Q6_Wh_vadd_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1295,7 +1295,7 @@ pub unsafe fn Q6_Vub_vadd_VubVub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1307,7 +1307,7 @@ pub unsafe fn Q6_Wub_vadd_WubWub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1319,7 +1319,7 @@ pub unsafe fn Q6_Vuh_vadd_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1331,7 +1331,7 @@ pub unsafe fn Q6_Wuh_vadd_WuhWuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1343,7 +1343,7 @@ pub unsafe fn Q6_Ww_vadd_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1355,7 +1355,7 @@ pub unsafe fn Q6_Vw_vadd_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddw_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1367,7 +1367,7 @@ pub unsafe fn Q6_Ww_vadd_WwWw(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1379,7 +1379,7 @@ pub unsafe fn Q6_Vw_vadd_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1391,7 +1391,7 @@ pub unsafe fn Q6_Ww_vadd_WwWw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(valignb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1403,7 +1403,7 @@ pub unsafe fn Q6_V_valign_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVecto /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(valignbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1415,7 +1415,7 @@ pub unsafe fn Q6_V_valign_VVI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vand))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1427,7 +1427,7 @@ pub unsafe fn Q6_V_vand_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1439,7 +1439,7 @@ pub unsafe fn Q6_Vh_vasl_VhR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1451,7 +1451,7 @@ pub unsafe fn Q6_Vh_vasl_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1463,7 +1463,7 @@ pub unsafe fn Q6_Vw_vasl_VwR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1475,7 +1475,7 @@ pub unsafe fn Q6_Vw_vaslacc_VwVwR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1487,7 +1487,7 @@ pub unsafe fn Q6_Vw_vasl_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1499,7 +1499,7 @@ pub unsafe fn Q6_Vh_vasr_VhR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhbrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1511,7 +1511,7 @@ pub unsafe fn Q6_Vb_vasr_VhVhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1523,7 +1523,7 @@ pub unsafe fn Q6_Vub_vasr_VhVhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1535,7 +1535,7 @@ pub unsafe fn Q6_Vub_vasr_VhVhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1547,7 +1547,7 @@ pub unsafe fn Q6_Vh_vasr_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1559,7 +1559,7 @@ pub unsafe fn Q6_Vw_vasr_VwR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1571,7 +1571,7 @@ pub unsafe fn Q6_Vw_vasracc_VwVwR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1583,7 +1583,7 @@ pub unsafe fn Q6_Vh_vasr_VwVwR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVect /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1595,7 +1595,7 @@ pub unsafe fn Q6_Vh_vasr_VwVwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1607,7 +1607,7 @@ pub unsafe fn Q6_Vh_vasr_VwVwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hvx /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1619,7 +1619,7 @@ pub unsafe fn Q6_Vuh_vasr_VwVwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1631,7 +1631,7 @@ pub unsafe fn Q6_Vw_vasr_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vassign))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1643,7 +1643,7 @@ pub unsafe fn Q6_V_equals_V(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vassignp))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1655,7 +1655,7 @@ pub unsafe fn Q6_W_equals_W(vuu: HvxVectorPair) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1667,7 +1667,7 @@ pub unsafe fn Q6_Vh_vavg_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavghrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1679,7 +1679,7 @@ pub unsafe fn Q6_Vh_vavg_VhVh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1691,7 +1691,7 @@ pub unsafe fn Q6_Vub_vavg_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgubrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1703,7 +1703,7 @@ pub unsafe fn Q6_Vub_vavg_VubVub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavguh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1715,7 +1715,7 @@ pub unsafe fn Q6_Vuh_vavg_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavguhrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1727,7 +1727,7 @@ pub unsafe fn Q6_Vuh_vavg_VuhVuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1739,7 +1739,7 @@ pub unsafe fn Q6_Vw_vavg_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgwrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1751,7 +1751,7 @@ pub unsafe fn Q6_Vw_vavg_VwVw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcl0h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1763,7 +1763,7 @@ pub unsafe fn Q6_Vuh_vcl0_Vuh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcl0w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1775,7 +1775,7 @@ pub unsafe fn Q6_Vuw_vcl0_Vuw(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcombine))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1787,7 +1787,7 @@ pub unsafe fn Q6_W_vcombine_VV(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vd0))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1799,7 +1799,7 @@ pub unsafe fn Q6_V_vzero() -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1811,7 +1811,7 @@ pub unsafe fn Q6_Vb_vdeal_Vb(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealb4w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1823,7 +1823,7 @@ pub unsafe fn Q6_Vb_vdeale_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1835,7 +1835,7 @@ pub unsafe fn Q6_Vh_vdeal_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealvdd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1847,7 +1847,7 @@ pub unsafe fn Q6_W_vdeal_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdelta))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1859,7 +1859,7 @@ pub unsafe fn Q6_V_vdelta_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1871,7 +1871,7 @@ pub unsafe fn Q6_Vh_vdmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1883,7 +1883,7 @@ pub unsafe fn Q6_Vh_vdmpyacc_VhVubRb(vx: HvxVector, vu: HvxVector, rt: i32) -> H /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1895,7 +1895,7 @@ pub unsafe fn Q6_Wh_vdmpy_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_dv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1911,7 +1911,7 @@ pub unsafe fn Q6_Wh_vdmpyacc_WhWubRb( /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1923,7 +1923,7 @@ pub unsafe fn Q6_Vw_vdmpy_VhRb(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1935,7 +1935,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwVhRb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1947,7 +1947,7 @@ pub unsafe fn Q6_Ww_vdmpy_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_dv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1963,7 +1963,7 @@ pub unsafe fn Q6_Ww_vdmpyacc_WwWhRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhisat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1975,7 +1975,7 @@ pub unsafe fn Q6_Vw_vdmpy_WhRh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhisat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1987,7 +1987,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwWhRh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1999,7 +1999,7 @@ pub unsafe fn Q6_Vw_vdmpy_VhRh_sat(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2011,7 +2011,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwVhRh_sat(vx: HvxVector, vu: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsuisat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2023,7 +2023,7 @@ pub unsafe fn Q6_Vw_vdmpy_WhRuh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsuisat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2035,7 +2035,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwWhRuh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsusat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2047,7 +2047,7 @@ pub unsafe fn Q6_Vw_vdmpy_VhRuh_sat(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsusat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2059,7 +2059,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwVhRuh_sat(vx: HvxVector, vu: HvxVector, rt: i32) /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhvsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2071,7 +2071,7 @@ pub unsafe fn Q6_Vw_vdmpy_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhvsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2083,7 +2083,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwVhVh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVec /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdsaduh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2095,7 +2095,7 @@ pub unsafe fn Q6_Wuw_vdsad_WuhRuh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdsaduh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2111,7 +2111,7 @@ pub unsafe fn Q6_Wuw_vdsadacc_WuwWuhRuh( /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vinsertwr))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2123,7 +2123,7 @@ pub unsafe fn Q6_Vw_vinsert_VwR(vx: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlalignb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2135,7 +2135,7 @@ pub unsafe fn Q6_V_vlalign_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVect /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlalignbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2147,7 +2147,7 @@ pub unsafe fn Q6_V_vlalign_VVI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVec /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2159,7 +2159,7 @@ pub unsafe fn Q6_Vuh_vlsr_VuhR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2171,7 +2171,7 @@ pub unsafe fn Q6_Vh_vlsr_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2183,7 +2183,7 @@ pub unsafe fn Q6_Vuw_vlsr_VuwR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2195,7 +2195,7 @@ pub unsafe fn Q6_Vw_vlsr_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvvb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2207,7 +2207,7 @@ pub unsafe fn Q6_Vb_vlut32_VbVbR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVe /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvvb_oracc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2224,7 +2224,7 @@ pub unsafe fn Q6_Vb_vlut32or_VbVbVbR( /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2236,7 +2236,7 @@ pub unsafe fn Q6_Wh_vlut16_VbVhR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVe /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvwh_oracc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2253,7 +2253,7 @@ pub unsafe fn Q6_Wh_vlut16or_WhVbVhR( /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2265,7 +2265,7 @@ pub unsafe fn Q6_Vh_vmax_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2277,7 +2277,7 @@ pub unsafe fn Q6_Vub_vmax_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2289,7 +2289,7 @@ pub unsafe fn Q6_Vuh_vmax_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2301,7 +2301,7 @@ pub unsafe fn Q6_Vw_vmax_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2313,7 +2313,7 @@ pub unsafe fn Q6_Vh_vmin_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2325,7 +2325,7 @@ pub unsafe fn Q6_Vub_vmin_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2337,7 +2337,7 @@ pub unsafe fn Q6_Vuh_vmin_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2349,7 +2349,7 @@ pub unsafe fn Q6_Vw_vmin_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2361,7 +2361,7 @@ pub unsafe fn Q6_Wh_vmpa_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2377,7 +2377,7 @@ pub unsafe fn Q6_Wh_vmpaacc_WhWubRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2389,7 +2389,7 @@ pub unsafe fn Q6_Wh_vmpa_WubWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVec /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabuuv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2401,7 +2401,7 @@ pub unsafe fn Q6_Wh_vmpa_WubWub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVe /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpahb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2413,7 +2413,7 @@ pub unsafe fn Q6_Ww_vmpa_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpahb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2429,7 +2429,7 @@ pub unsafe fn Q6_Ww_vmpaacc_WwWhRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2441,7 +2441,7 @@ pub unsafe fn Q6_Wh_vmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2453,7 +2453,7 @@ pub unsafe fn Q6_Wh_vmpyacc_WhVubRb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2465,7 +2465,7 @@ pub unsafe fn Q6_Wh_vmpy_VubVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybusv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2481,7 +2481,7 @@ pub unsafe fn Q6_Wh_vmpyacc_WhVubVb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2493,7 +2493,7 @@ pub unsafe fn Q6_Wh_vmpy_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2509,7 +2509,7 @@ pub unsafe fn Q6_Wh_vmpyacc_WhVbVb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyewuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2521,7 +2521,7 @@ pub unsafe fn Q6_Vw_vmpye_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2533,7 +2533,7 @@ pub unsafe fn Q6_Ww_vmpy_VhRh(vu: HvxVector, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2549,7 +2549,7 @@ pub unsafe fn Q6_Ww_vmpyacc_WwVhRh_sat( /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhsrs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2561,7 +2561,7 @@ pub unsafe fn Q6_Vh_vmpy_VhRh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhss))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2573,7 +2573,7 @@ pub unsafe fn Q6_Vh_vmpy_VhRh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2585,7 +2585,7 @@ pub unsafe fn Q6_Ww_vmpy_VhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2601,7 +2601,7 @@ pub unsafe fn Q6_Ww_vmpyacc_WwVhVuh( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2613,7 +2613,7 @@ pub unsafe fn Q6_Ww_vmpy_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2629,7 +2629,7 @@ pub unsafe fn Q6_Ww_vmpyacc_WwVhVh( /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhvsrs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2641,7 +2641,7 @@ pub unsafe fn Q6_Vh_vmpy_VhVh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVec /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyieoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2653,7 +2653,7 @@ pub unsafe fn Q6_Vw_vmpyieo_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2665,7 +2665,7 @@ pub unsafe fn Q6_Vw_vmpyieacc_VwVwVh(vx: HvxVector, vu: HvxVector, vv: HvxVector /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2677,7 +2677,7 @@ pub unsafe fn Q6_Vw_vmpyie_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewuh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2689,7 +2689,7 @@ pub unsafe fn Q6_Vw_vmpyieacc_VwVwVuh(vx: HvxVector, vu: HvxVector, vv: HvxVecto /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyih))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2701,7 +2701,7 @@ pub unsafe fn Q6_Vh_vmpyi_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyih_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2713,7 +2713,7 @@ pub unsafe fn Q6_Vh_vmpyiacc_VhVhVh(vx: HvxVector, vu: HvxVector, vv: HvxVector) /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyihb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2725,7 +2725,7 @@ pub unsafe fn Q6_Vh_vmpyi_VhRb(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyihb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2737,7 +2737,7 @@ pub unsafe fn Q6_Vh_vmpyiacc_VhVhRb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiowh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2749,7 +2749,7 @@ pub unsafe fn Q6_Vw_vmpyio_VwVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2761,7 +2761,7 @@ pub unsafe fn Q6_Vw_vmpyi_VwRb(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2773,7 +2773,7 @@ pub unsafe fn Q6_Vw_vmpyiacc_VwVwRb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2785,7 +2785,7 @@ pub unsafe fn Q6_Vw_vmpyi_VwRh(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2797,7 +2797,7 @@ pub unsafe fn Q6_Vw_vmpyiacc_VwVwRh(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2809,7 +2809,7 @@ pub unsafe fn Q6_Vw_vmpyo_VwVh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_rnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2821,7 +2821,7 @@ pub unsafe fn Q6_Vw_vmpyo_VwVh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVe /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_rnd_sacc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2837,7 +2837,7 @@ pub unsafe fn Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_sacc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2853,7 +2853,7 @@ pub unsafe fn Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2865,7 +2865,7 @@ pub unsafe fn Q6_Wuh_vmpy_VubRub(vu: HvxVector, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2881,7 +2881,7 @@ pub unsafe fn Q6_Wuh_vmpyacc_WuhVubRub( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyubv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2893,7 +2893,7 @@ pub unsafe fn Q6_Wuh_vmpy_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyubv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2909,7 +2909,7 @@ pub unsafe fn Q6_Wuh_vmpyacc_WuhVubVub( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2921,7 +2921,7 @@ pub unsafe fn Q6_Wuw_vmpy_VuhRuh(vu: HvxVector, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2937,7 +2937,7 @@ pub unsafe fn Q6_Wuw_vmpyacc_WuwVuhRuh( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2949,7 +2949,7 @@ pub unsafe fn Q6_Wuw_vmpy_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuhv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2965,7 +2965,7 @@ pub unsafe fn Q6_Wuw_vmpyacc_WuwVuhVuh( /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2977,7 +2977,7 @@ pub unsafe fn Q6_Vh_vnavg_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2989,7 +2989,7 @@ pub unsafe fn Q6_Vb_vnavg_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3001,7 +3001,7 @@ pub unsafe fn Q6_Vw_vnavg_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnormamth))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3013,7 +3013,7 @@ pub unsafe fn Q6_Vh_vnormamt_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnormamtw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3025,7 +3025,7 @@ pub unsafe fn Q6_Vw_vnormamt_Vw(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnot))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3037,7 +3037,7 @@ pub unsafe fn Q6_V_vnot_V(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vor))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3049,7 +3049,7 @@ pub unsafe fn Q6_V_vor_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3061,7 +3061,7 @@ pub unsafe fn Q6_Vb_vpacke_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3073,7 +3073,7 @@ pub unsafe fn Q6_Vh_vpacke_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackhb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3085,7 +3085,7 @@ pub unsafe fn Q6_Vb_vpack_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackhub_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3097,7 +3097,7 @@ pub unsafe fn Q6_Vub_vpack_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3109,7 +3109,7 @@ pub unsafe fn Q6_Vb_vpacko_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3121,7 +3121,7 @@ pub unsafe fn Q6_Vh_vpacko_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackwh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3133,7 +3133,7 @@ pub unsafe fn Q6_Vh_vpack_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackwuh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3145,7 +3145,7 @@ pub unsafe fn Q6_Vuh_vpack_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpopcounth))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3157,7 +3157,7 @@ pub unsafe fn Q6_Vh_vpopcount_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrdelta))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3169,7 +3169,7 @@ pub unsafe fn Q6_V_vrdelta_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3181,7 +3181,7 @@ pub unsafe fn Q6_Vw_vrmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3193,7 +3193,7 @@ pub unsafe fn Q6_Vw_vrmpyacc_VwVubRb(vx: HvxVector, vu: HvxVector, rt: i32) -> H /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3205,7 +3205,7 @@ pub unsafe fn Q6_Ww_vrmpy_WubRbI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVe /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3222,7 +3222,7 @@ pub unsafe fn Q6_Ww_vrmpyacc_WwWubRbI( /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3234,7 +3234,7 @@ pub unsafe fn Q6_Vw_vrmpy_VubVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3246,7 +3246,7 @@ pub unsafe fn Q6_Vw_vrmpyacc_VwVubVb(vx: HvxVector, vu: HvxVector, vv: HvxVector /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3258,7 +3258,7 @@ pub unsafe fn Q6_Vw_vrmpy_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3270,7 +3270,7 @@ pub unsafe fn Q6_Vw_vrmpyacc_VwVbVb(vx: HvxVector, vu: HvxVector, vv: HvxVector) /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3282,7 +3282,7 @@ pub unsafe fn Q6_Vuw_vrmpy_VubRub(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3294,7 +3294,7 @@ pub unsafe fn Q6_Vuw_vrmpyacc_VuwVubRub(vx: HvxVector, vu: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3306,7 +3306,7 @@ pub unsafe fn Q6_Wuw_vrmpy_WubRubI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> Hvx /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3323,7 +3323,7 @@ pub unsafe fn Q6_Wuw_vrmpyacc_WuwWubRubI( /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3335,7 +3335,7 @@ pub unsafe fn Q6_Vuw_vrmpy_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3347,7 +3347,7 @@ pub unsafe fn Q6_Vuw_vrmpyacc_VuwVubVub(vx: HvxVector, vu: HvxVector, vv: HvxVec /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vror))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3359,7 +3359,7 @@ pub unsafe fn Q6_V_vror_VR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3371,7 +3371,7 @@ pub unsafe fn Q6_Vb_vround_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundhub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3383,7 +3383,7 @@ pub unsafe fn Q6_Vub_vround_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3395,7 +3395,7 @@ pub unsafe fn Q6_Vh_vround_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3407,7 +3407,7 @@ pub unsafe fn Q6_Vuh_vround_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrsadubi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3419,7 +3419,7 @@ pub unsafe fn Q6_Wuw_vrsad_WubRubI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> Hvx /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrsadubi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3436,7 +3436,7 @@ pub unsafe fn Q6_Wuw_vrsadacc_WuwWubRubI( /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsathub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3448,7 +3448,7 @@ pub unsafe fn Q6_Vub_vsat_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsatwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3460,7 +3460,7 @@ pub unsafe fn Q6_Vh_vsat_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3472,7 +3472,7 @@ pub unsafe fn Q6_Wh_vsxt_Vb(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3484,7 +3484,7 @@ pub unsafe fn Q6_Ww_vsxt_Vh(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3496,7 +3496,7 @@ pub unsafe fn Q6_Vh_vshuffe_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3508,7 +3508,7 @@ pub unsafe fn Q6_Vb_vshuff_Vb(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3520,7 +3520,7 @@ pub unsafe fn Q6_Vb_vshuffe_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3532,7 +3532,7 @@ pub unsafe fn Q6_Vh_vshuff_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3544,7 +3544,7 @@ pub unsafe fn Q6_Vb_vshuffo_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffvdd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3556,7 +3556,7 @@ pub unsafe fn Q6_W_vshuff_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVecto /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3568,7 +3568,7 @@ pub unsafe fn Q6_Wb_vshuffoe_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3580,7 +3580,7 @@ pub unsafe fn Q6_Wh_vshuffoe_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3592,7 +3592,7 @@ pub unsafe fn Q6_Vh_vshuffo_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3604,7 +3604,7 @@ pub unsafe fn Q6_Vb_vsub_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3616,7 +3616,7 @@ pub unsafe fn Q6_Wb_vsub_WbWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3628,7 +3628,7 @@ pub unsafe fn Q6_Vh_vsub_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubh_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3640,7 +3640,7 @@ pub unsafe fn Q6_Wh_vsub_WhWh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3652,7 +3652,7 @@ pub unsafe fn Q6_Vh_vsub_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3664,7 +3664,7 @@ pub unsafe fn Q6_Wh_vsub_WhWh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3676,7 +3676,7 @@ pub unsafe fn Q6_Ww_vsub_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3688,7 +3688,7 @@ pub unsafe fn Q6_Wh_vsub_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3700,7 +3700,7 @@ pub unsafe fn Q6_Vub_vsub_VubVub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3712,7 +3712,7 @@ pub unsafe fn Q6_Wub_vsub_WubWub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3724,7 +3724,7 @@ pub unsafe fn Q6_Vuh_vsub_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3736,7 +3736,7 @@ pub unsafe fn Q6_Wuh_vsub_WuhWuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3748,7 +3748,7 @@ pub unsafe fn Q6_Ww_vsub_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3760,7 +3760,7 @@ pub unsafe fn Q6_Vw_vsub_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubw_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3772,7 +3772,7 @@ pub unsafe fn Q6_Ww_vsub_WwWw(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3784,7 +3784,7 @@ pub unsafe fn Q6_Vw_vsub_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3796,7 +3796,7 @@ pub unsafe fn Q6_Ww_vsub_WwWw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3808,7 +3808,7 @@ pub unsafe fn Q6_Wh_vtmpy_WbRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3824,7 +3824,7 @@ pub unsafe fn Q6_Wh_vtmpyacc_WhWbRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3836,7 +3836,7 @@ pub unsafe fn Q6_Wh_vtmpy_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3852,7 +3852,7 @@ pub unsafe fn Q6_Wh_vtmpyacc_WhWubRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3864,7 +3864,7 @@ pub unsafe fn Q6_Ww_vtmpy_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3880,7 +3880,7 @@ pub unsafe fn Q6_Ww_vtmpyacc_WwWhRb( /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3892,7 +3892,7 @@ pub unsafe fn Q6_Wh_vunpack_Vb(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3904,7 +3904,7 @@ pub unsafe fn Q6_Ww_vunpack_Vh(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3916,7 +3916,7 @@ pub unsafe fn Q6_Wh_vunpackoor_WhVb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVec /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3928,7 +3928,7 @@ pub unsafe fn Q6_Ww_vunpackoor_WwVh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVec /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3940,7 +3940,7 @@ pub unsafe fn Q6_Wuh_vunpack_Vub(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3952,7 +3952,7 @@ pub unsafe fn Q6_Wuw_vunpack_Vuh(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vxor))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3964,7 +3964,7 @@ pub unsafe fn Q6_V_vxor_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vzb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3976,7 +3976,7 @@ pub unsafe fn Q6_Wuh_vzxt_Vub(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vzh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3988,7 +3988,7 @@ pub unsafe fn Q6_Wuw_vzxt_Vuh(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(lvsplatb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4000,7 +4000,7 @@ pub unsafe fn Q6_Vb_vsplat_R(rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(lvsplath))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4012,7 +4012,7 @@ pub unsafe fn Q6_Vh_vsplat_R(rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4024,7 +4024,7 @@ pub unsafe fn Q6_Vb_vadd_VbVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddbsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4036,7 +4036,7 @@ pub unsafe fn Q6_Wb_vadd_WbWb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddclbh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4048,7 +4048,7 @@ pub unsafe fn Q6_Vh_vadd_vclb_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddclbw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4060,7 +4060,7 @@ pub unsafe fn Q6_Vw_vadd_vclb_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddhw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4076,7 +4076,7 @@ pub unsafe fn Q6_Ww_vaddacc_WwVhVh( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddubh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4092,7 +4092,7 @@ pub unsafe fn Q6_Wh_vaddacc_WhVubVub( /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddububb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4104,7 +4104,7 @@ pub unsafe fn Q6_Vub_vadd_VubVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduhw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4120,7 +4120,7 @@ pub unsafe fn Q6_Ww_vaddacc_WwVuhVuh( /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4132,7 +4132,7 @@ pub unsafe fn Q6_Vuw_vadd_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4144,7 +4144,7 @@ pub unsafe fn Q6_Wuw_vadd_WuwWuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasrhbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4156,7 +4156,7 @@ pub unsafe fn Q6_Vb_vasr_VhVhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hvx /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasruwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4168,7 +4168,7 @@ pub unsafe fn Q6_Vuh_vasr_VuwVuwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasrwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4180,7 +4180,7 @@ pub unsafe fn Q6_Vuh_vasr_VwVwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlsrb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4192,7 +4192,7 @@ pub unsafe fn Q6_Vub_vlsr_VubR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvb_nm))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4204,7 +4204,7 @@ pub unsafe fn Q6_Vb_vlut32_VbVbR_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvb_oracci))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4221,7 +4221,7 @@ pub unsafe fn Q6_Vb_vlut32or_VbVbVbI( /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4233,7 +4233,7 @@ pub unsafe fn Q6_Vb_vlut32_VbVbI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxV /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwh_nm))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4245,7 +4245,7 @@ pub unsafe fn Q6_Wh_vlut16_VbVhR_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwh_oracci))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4262,7 +4262,7 @@ pub unsafe fn Q6_Wh_vlut16or_WhVbVhI( /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwhi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4274,7 +4274,7 @@ pub unsafe fn Q6_Wh_vlut16_VbVhI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxV /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmaxb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4286,7 +4286,7 @@ pub unsafe fn Q6_Vb_vmax_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vminb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4298,7 +4298,7 @@ pub unsafe fn Q6_Vb_vmin_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpauhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4310,7 +4310,7 @@ pub unsafe fn Q6_Ww_vmpa_WuhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpauhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4326,7 +4326,7 @@ pub unsafe fn Q6_Ww_vmpaacc_WwWuhRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyewuh_64))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4338,7 +4338,7 @@ pub unsafe fn Q6_W_vmpye_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyiwub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4350,7 +4350,7 @@ pub unsafe fn Q6_Vw_vmpyi_VwRub(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyiwub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4362,7 +4362,7 @@ pub unsafe fn Q6_Vw_vmpyiacc_VwVwRub(vx: HvxVector, vu: HvxVector, rt: i32) -> H /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyowh_64_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4378,7 +4378,7 @@ pub unsafe fn Q6_W_vmpyoacc_WVwVh( /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vrounduhub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4390,7 +4390,7 @@ pub unsafe fn Q6_Vub_vround_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vrounduwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4402,7 +4402,7 @@ pub unsafe fn Q6_Vuh_vround_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsatuwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4414,7 +4414,7 @@ pub unsafe fn Q6_Vuh_vsat_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4426,7 +4426,7 @@ pub unsafe fn Q6_Vb_vsub_VbVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubbsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4438,7 +4438,7 @@ pub unsafe fn Q6_Wb_vsub_WbWb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubububb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4450,7 +4450,7 @@ pub unsafe fn Q6_Vub_vsub_VubVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubuwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4462,7 +4462,7 @@ pub unsafe fn Q6_Vuw_vsub_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubuwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4474,7 +4474,7 @@ pub unsafe fn Q6_Wuw_vsub_WuwWuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vabsb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4486,7 +4486,7 @@ pub unsafe fn Q6_Vb_vabs_Vb(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vabsb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4498,7 +4498,7 @@ pub unsafe fn Q6_Vb_vabs_Vb_sat(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vaslh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4510,7 +4510,7 @@ pub unsafe fn Q6_Vh_vaslacc_VhVhR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasrh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4522,7 +4522,7 @@ pub unsafe fn Q6_Vh_vasracc_VhVhR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4534,7 +4534,7 @@ pub unsafe fn Q6_Vub_vasr_VuhVuhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4546,7 +4546,7 @@ pub unsafe fn Q6_Vub_vasr_VuhVuhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4558,7 +4558,7 @@ pub unsafe fn Q6_Vuh_vasr_VuwVuwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavgb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4570,7 +4570,7 @@ pub unsafe fn Q6_Vb_vavg_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavgbrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4582,7 +4582,7 @@ pub unsafe fn Q6_Vb_vavg_VbVb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavguw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4594,7 +4594,7 @@ pub unsafe fn Q6_Vuw_vavg_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavguwrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4606,7 +4606,7 @@ pub unsafe fn Q6_Vuw_vavg_VuwVuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: MAPPING /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vdd0))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4618,7 +4618,7 @@ pub unsafe fn Q6_W_vzero() -> HvxVectorPair { /// /// Instruction Type: CVI_GATHER /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4630,7 +4630,7 @@ pub unsafe fn Q6_vgather_ARMVh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVect /// /// Instruction Type: CVI_GATHER_DV /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4642,7 +4642,7 @@ pub unsafe fn Q6_vgather_ARMWw(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVec /// /// Instruction Type: CVI_GATHER /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4654,7 +4654,7 @@ pub unsafe fn Q6_vgather_ARMVw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVect /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpabuu))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4666,7 +4666,7 @@ pub unsafe fn Q6_Wh_vmpa_WubRub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpabuu_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4682,7 +4682,7 @@ pub unsafe fn Q6_Wh_vmpaacc_WhWubRub( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4694,7 +4694,7 @@ pub unsafe fn Q6_Ww_vmpyacc_WwVhRh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyuhe))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4706,7 +4706,7 @@ pub unsafe fn Q6_Vuw_vmpye_VuhRuh(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyuhe_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4718,7 +4718,7 @@ pub unsafe fn Q6_Vuw_vmpyeacc_VuwVuhRuh(vx: HvxVector, vu: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vnavgb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4730,7 +4730,7 @@ pub unsafe fn Q6_Vb_vnavg_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4742,7 +4742,7 @@ pub unsafe fn Q6_vscatter_RMVhV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) /// /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermh_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4754,7 +4754,7 @@ pub unsafe fn Q6_vscatteracc_RMVhV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVecto /// /// Instruction Type: CVI_SCATTER_DV /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4766,7 +4766,7 @@ pub unsafe fn Q6_vscatter_RMWwV(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVec /// /// Instruction Type: CVI_SCATTER_DV /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermhw_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4778,7 +4778,7 @@ pub unsafe fn Q6_vscatteracc_RMWwV(rt: i32, mu: i32, vvv: HvxVectorPair, vw: Hvx /// /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4790,7 +4790,7 @@ pub unsafe fn Q6_vscatter_RMVwV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) /// /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermw_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4802,7 +4802,7 @@ pub unsafe fn Q6_vscatteracc_RMVwV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVecto /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vasr_into))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4818,7 +4818,7 @@ pub unsafe fn Q6_Ww_vasrinto_WwVwVw( /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vrotr))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4830,7 +4830,7 @@ pub unsafe fn Q6_Vuw_vrotr_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vsatdw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4842,7 +4842,7 @@ pub unsafe fn Q6_Vw_vsatdw_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyhubs10))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4858,7 +4858,7 @@ pub unsafe fn Q6_Ww_v6mpy_WubWbI_h( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyhubs10_vxx))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4875,7 +4875,7 @@ pub unsafe fn Q6_Ww_v6mpyacc_WwWubWbI_h( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyvubs10))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4891,7 +4891,7 @@ pub unsafe fn Q6_Ww_v6mpy_WubWbI_v( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyvubs10_vxx))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4908,7 +4908,7 @@ pub unsafe fn Q6_Ww_v6mpyacc_WwWubWbI_v( /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vabs_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4920,7 +4920,7 @@ pub unsafe fn Q6_Vhf_vabs_Vhf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vabs_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4932,7 +4932,7 @@ pub unsafe fn Q6_Vsf_vabs_Vsf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4944,7 +4944,7 @@ pub unsafe fn Q6_Vqf16_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4956,7 +4956,7 @@ pub unsafe fn Q6_Vhf_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4968,7 +4968,7 @@ pub unsafe fn Q6_Vqf16_vadd_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf16_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4980,7 +4980,7 @@ pub unsafe fn Q6_Vqf16_vadd_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4992,7 +4992,7 @@ pub unsafe fn Q6_Vqf32_vadd_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf32_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5004,7 +5004,7 @@ pub unsafe fn Q6_Vqf32_vadd_Vqf32Vsf(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5016,7 +5016,7 @@ pub unsafe fn Q6_Vqf32_vadd_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5028,7 +5028,7 @@ pub unsafe fn Q6_Wsf_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5040,7 +5040,7 @@ pub unsafe fn Q6_Vsf_vadd_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vassign_fp))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5052,7 +5052,7 @@ pub unsafe fn Q6_Vw_vfmv_Vw(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_hf_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5064,7 +5064,7 @@ pub unsafe fn Q6_Vhf_equals_Vqf16(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_hf_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5076,7 +5076,7 @@ pub unsafe fn Q6_Vhf_equals_Wqf32(vuu: HvxVectorPair) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_sf_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5088,7 +5088,7 @@ pub unsafe fn Q6_Vsf_equals_Vqf32(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_b_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5100,7 +5100,7 @@ pub unsafe fn Q6_Vb_vcvt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_h_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5112,7 +5112,7 @@ pub unsafe fn Q6_Vh_vcvt_Vhf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_b))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5124,7 +5124,7 @@ pub unsafe fn Q6_Whf_vcvt_Vb(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5136,7 +5136,7 @@ pub unsafe fn Q6_Vhf_vcvt_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5148,7 +5148,7 @@ pub unsafe fn Q6_Vhf_vcvt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_ub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5160,7 +5160,7 @@ pub unsafe fn Q6_Whf_vcvt_Vub(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_uh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5172,7 +5172,7 @@ pub unsafe fn Q6_Vhf_vcvt_Vuh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5184,7 +5184,7 @@ pub unsafe fn Q6_Wsf_vcvt_Vhf(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_ub_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5196,7 +5196,7 @@ pub unsafe fn Q6_Vub_vcvt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_uh_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5208,7 +5208,7 @@ pub unsafe fn Q6_Vuh_vcvt_Vhf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vdmpy_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5220,7 +5220,7 @@ pub unsafe fn Q6_Vsf_vdmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vdmpy_sf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5232,7 +5232,7 @@ pub unsafe fn Q6_Vsf_vdmpyacc_VsfVhfVhf(vx: HvxVector, vu: HvxVector, vv: HvxVec /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmax_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5244,7 +5244,7 @@ pub unsafe fn Q6_Vhf_vfmax_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmax_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5256,7 +5256,7 @@ pub unsafe fn Q6_Vsf_vfmax_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmin_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5268,7 +5268,7 @@ pub unsafe fn Q6_Vhf_vfmin_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmin_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5280,7 +5280,7 @@ pub unsafe fn Q6_Vsf_vfmin_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfneg_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5292,7 +5292,7 @@ pub unsafe fn Q6_Vhf_vfneg_Vhf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfneg_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5304,7 +5304,7 @@ pub unsafe fn Q6_Vsf_vfneg_Vsf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmax_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5316,7 +5316,7 @@ pub unsafe fn Q6_Vhf_vmax_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmax_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5328,7 +5328,7 @@ pub unsafe fn Q6_Vsf_vmax_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmin_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5340,7 +5340,7 @@ pub unsafe fn Q6_Vhf_vmin_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmin_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5352,7 +5352,7 @@ pub unsafe fn Q6_Vsf_vmin_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5364,7 +5364,7 @@ pub unsafe fn Q6_Vhf_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_hf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5376,7 +5376,7 @@ pub unsafe fn Q6_Vhf_vmpyacc_VhfVhfVhf(vx: HvxVector, vu: HvxVector, vv: HvxVect /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5388,7 +5388,7 @@ pub unsafe fn Q6_Vqf16_vmpy_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5400,7 +5400,7 @@ pub unsafe fn Q6_Vqf16_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16_mix_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5412,7 +5412,7 @@ pub unsafe fn Q6_Vqf16_vmpy_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5424,7 +5424,7 @@ pub unsafe fn Q6_Vqf32_vmpy_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5436,7 +5436,7 @@ pub unsafe fn Q6_Wqf32_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPai /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_mix_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5448,7 +5448,7 @@ pub unsafe fn Q6_Wqf32_vmpy_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorP /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5460,7 +5460,7 @@ pub unsafe fn Q6_Wqf32_vmpy_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5472,7 +5472,7 @@ pub unsafe fn Q6_Vqf32_vmpy_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5484,7 +5484,7 @@ pub unsafe fn Q6_Wsf_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5500,7 +5500,7 @@ pub unsafe fn Q6_Wsf_vmpyacc_WsfVhfVhf( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5512,7 +5512,7 @@ pub unsafe fn Q6_Vsf_vmpy_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5524,7 +5524,7 @@ pub unsafe fn Q6_Vqf16_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5536,7 +5536,7 @@ pub unsafe fn Q6_Vhf_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5548,7 +5548,7 @@ pub unsafe fn Q6_Vqf16_vsub_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf16_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5560,7 +5560,7 @@ pub unsafe fn Q6_Vqf16_vsub_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5572,7 +5572,7 @@ pub unsafe fn Q6_Vqf32_vsub_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf32_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5584,7 +5584,7 @@ pub unsafe fn Q6_Vqf32_vsub_Vqf32Vsf(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5596,7 +5596,7 @@ pub unsafe fn Q6_Vqf32_vsub_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5608,7 +5608,7 @@ pub unsafe fn Q6_Wsf_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5620,7 +5620,7 @@ pub unsafe fn Q6_Vsf_vsub_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvuhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5632,7 +5632,7 @@ pub unsafe fn Q6_Vub_vasr_WuhVub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> H /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvuhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5644,7 +5644,7 @@ pub unsafe fn Q6_Vub_vasr_WuhVub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVe /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5656,7 +5656,7 @@ pub unsafe fn Q6_Vuh_vasr_WwVuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> Hv /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5668,7 +5668,7 @@ pub unsafe fn Q6_Vuh_vasr_WwVuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVec /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vmpyuhvs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5680,7 +5680,7 @@ pub unsafe fn Q6_Vuh_vmpy_VuhVuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_h_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5692,7 +5692,7 @@ pub unsafe fn Q6_Vh_equals_Vhf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_hf_h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5704,7 +5704,7 @@ pub unsafe fn Q6_Vhf_equals_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_sf_w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5716,7 +5716,7 @@ pub unsafe fn Q6_Vsf_equals_Vw(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_w_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5728,7 +5728,7 @@ pub unsafe fn Q6_Vw_equals_Vsf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(get_qfext))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5740,7 +5740,7 @@ pub unsafe fn Q6_V_vgetqfext_VR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(set_qfext))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5752,7 +5752,7 @@ pub unsafe fn Q6_V_vsetqfext_VR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vabs_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5764,7 +5764,7 @@ pub unsafe fn Q6_V_vabs_V(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt2_hf_b))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5776,7 +5776,7 @@ pub unsafe fn Q6_Whf_vcvt2_Vb(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt2_hf_ub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5788,7 +5788,7 @@ pub unsafe fn Q6_Whf_vcvt2_Vub(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt_hf_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5800,7 +5800,7 @@ pub unsafe fn Q6_Whf_vcvt_V(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfmax_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5812,7 +5812,7 @@ pub unsafe fn Q6_V_vfmax_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfmin_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5824,7 +5824,7 @@ pub unsafe fn Q6_V_vfmin_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfneg_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5837,7 +5837,7 @@ pub unsafe fn Q6_V_vfneg_V(vu: HvxVector) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_and_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -5855,7 +5855,7 @@ pub unsafe fn Q6_Q_and_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_and_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -5873,7 +5873,7 @@ pub unsafe fn Q6_Q_and_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPre /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_not_Q(qs: HvxVectorPred) -> HvxVectorPred { @@ -5891,7 +5891,7 @@ pub unsafe fn Q6_Q_not_Q(qs: HvxVectorPred) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_or_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -5909,7 +5909,7 @@ pub unsafe fn Q6_Q_or_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_or_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -5927,7 +5927,7 @@ pub unsafe fn Q6_Q_or_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vsetq_R(rt: i32) -> HvxVectorPred { @@ -5939,7 +5939,7 @@ pub unsafe fn Q6_Q_vsetq_R(rt: i32) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_xor_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -5957,7 +5957,7 @@ pub unsafe fn Q6_Q_xor_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VM_ST /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vmem_QnRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { @@ -5973,7 +5973,7 @@ pub unsafe fn Q6_vmem_QnRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VM_ST /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vmem_QnRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { @@ -5989,7 +5989,7 @@ pub unsafe fn Q6_vmem_QnRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVec /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VM_ST /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vmem_QRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { @@ -6005,7 +6005,7 @@ pub unsafe fn Q6_vmem_QRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VM_ST /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vmem_QRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { @@ -6021,7 +6021,7 @@ pub unsafe fn Q6_vmem_QRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vb_condacc_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6037,7 +6037,7 @@ pub unsafe fn Q6_Vb_condacc_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vb_condacc_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6053,7 +6053,7 @@ pub unsafe fn Q6_Vb_condacc_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vh_condacc_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6069,7 +6069,7 @@ pub unsafe fn Q6_Vh_condacc_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vh_condacc_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6085,7 +6085,7 @@ pub unsafe fn Q6_Vh_condacc_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_condacc_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6101,7 +6101,7 @@ pub unsafe fn Q6_Vw_condacc_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_condacc_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6117,7 +6117,7 @@ pub unsafe fn Q6_Vw_condacc_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vand_QR(qu: HvxVectorPred, rt: i32) -> HvxVector { @@ -6129,7 +6129,7 @@ pub unsafe fn Q6_V_vand_QR(qu: HvxVectorPred, rt: i32) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vandor_VQR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { @@ -6141,7 +6141,7 @@ pub unsafe fn Q6_V_vandor_VQR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxV /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vand_VR(vu: HvxVector, rt: i32) -> HvxVectorPred { @@ -6153,7 +6153,7 @@ pub unsafe fn Q6_Q_vand_VR(vu: HvxVector, rt: i32) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vandor_QVR(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred { @@ -6169,7 +6169,7 @@ pub unsafe fn Q6_Q_vandor_QVR(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxV /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eq_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6181,7 +6181,7 @@ pub unsafe fn Q6_Q_vcmp_eq_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqand_QVbVb( @@ -6204,7 +6204,7 @@ pub unsafe fn Q6_Q_vcmp_eqand_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqor_QVbVb( @@ -6227,7 +6227,7 @@ pub unsafe fn Q6_Q_vcmp_eqor_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqxacc_QVbVb( @@ -6250,7 +6250,7 @@ pub unsafe fn Q6_Q_vcmp_eqxacc_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eq_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6262,7 +6262,7 @@ pub unsafe fn Q6_Q_vcmp_eq_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqand_QVhVh( @@ -6285,7 +6285,7 @@ pub unsafe fn Q6_Q_vcmp_eqand_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqor_QVhVh( @@ -6308,7 +6308,7 @@ pub unsafe fn Q6_Q_vcmp_eqor_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqxacc_QVhVh( @@ -6331,7 +6331,7 @@ pub unsafe fn Q6_Q_vcmp_eqxacc_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eq_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6343,7 +6343,7 @@ pub unsafe fn Q6_Q_vcmp_eq_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqand_QVwVw( @@ -6366,7 +6366,7 @@ pub unsafe fn Q6_Q_vcmp_eqand_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqor_QVwVw( @@ -6389,7 +6389,7 @@ pub unsafe fn Q6_Q_vcmp_eqor_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqxacc_QVwVw( @@ -6412,7 +6412,7 @@ pub unsafe fn Q6_Q_vcmp_eqxacc_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6424,7 +6424,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVbVb( @@ -6447,7 +6447,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVbVb( @@ -6470,7 +6470,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVbVb( @@ -6493,7 +6493,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6505,7 +6505,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVhVh( @@ -6528,7 +6528,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVhVh( @@ -6551,7 +6551,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVhVh( @@ -6574,7 +6574,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6586,7 +6586,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVubVub( @@ -6609,7 +6609,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVubVub( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVubVub( @@ -6632,7 +6632,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVubVub( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVubVub( @@ -6655,7 +6655,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVubVub( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6667,7 +6667,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVuhVuh( @@ -6690,7 +6690,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVuhVuh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVuhVuh( @@ -6713,7 +6713,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVuhVuh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVuhVuh( @@ -6736,7 +6736,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVuhVuh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6748,7 +6748,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVuwVuw( @@ -6771,7 +6771,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVuwVuw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVuwVuw( @@ -6794,7 +6794,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVuwVuw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVuwVuw( @@ -6817,7 +6817,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVuwVuw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6829,7 +6829,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVwVw( @@ -6852,7 +6852,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVwVw( @@ -6875,7 +6875,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVwVw( @@ -6898,7 +6898,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vmux_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector { @@ -6914,7 +6914,7 @@ pub unsafe fn Q6_V_vmux_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vb_condnac_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6930,7 +6930,7 @@ pub unsafe fn Q6_Vb_condnac_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vb_condnac_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6946,7 +6946,7 @@ pub unsafe fn Q6_Vb_condnac_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vh_condnac_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6962,7 +6962,7 @@ pub unsafe fn Q6_Vh_condnac_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vh_condnac_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6978,7 +6978,7 @@ pub unsafe fn Q6_Vh_condnac_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_condnac_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6994,7 +6994,7 @@ pub unsafe fn Q6_Vw_condnac_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_condnac_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -7010,7 +7010,7 @@ pub unsafe fn Q6_Vw_condnac_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_W_vswap_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair { @@ -7026,7 +7026,7 @@ pub unsafe fn Q6_W_vswap_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vsetq2_R(rt: i32) -> HvxVectorPred { @@ -7038,7 +7038,7 @@ pub unsafe fn Q6_Q_vsetq2_R(rt: i32) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Qb_vshuffe_QhQh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -7056,7 +7056,7 @@ pub unsafe fn Q6_Qb_vshuffe_QhQh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVec /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Qh_vshuffe_QwQw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -7074,7 +7074,7 @@ pub unsafe fn Q6_Qh_vshuffe_QwQw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVec /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vand_QnR(qu: HvxVectorPred, rt: i32) -> HvxVector { @@ -7089,7 +7089,7 @@ pub unsafe fn Q6_V_vand_QnR(qu: HvxVectorPred, rt: i32) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vandor_VQnR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { @@ -7105,7 +7105,7 @@ pub unsafe fn Q6_V_vandor_VQnR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> Hvx /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vand_QnV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { @@ -7120,7 +7120,7 @@ pub unsafe fn Q6_V_vand_QnV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vand_QV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { @@ -7135,7 +7135,7 @@ pub unsafe fn Q6_V_vand_QV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_GATHER /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vgather_AQRMVh( @@ -7159,7 +7159,7 @@ pub unsafe fn Q6_vgather_AQRMVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_GATHER_DV /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vgather_AQRMWw( @@ -7183,7 +7183,7 @@ pub unsafe fn Q6_vgather_AQRMWw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_GATHER /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vgather_AQRMVw( @@ -7207,7 +7207,7 @@ pub unsafe fn Q6_vgather_AQRMVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vb_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { @@ -7222,7 +7222,7 @@ pub unsafe fn Q6_Vb_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vh_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { @@ -7237,7 +7237,7 @@ pub unsafe fn Q6_Vh_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { @@ -7252,7 +7252,7 @@ pub unsafe fn Q6_Vw_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vscatter_QRMVhV( @@ -7276,7 +7276,7 @@ pub unsafe fn Q6_vscatter_QRMVhV( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_SCATTER_DV /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vscatter_QRMWwV( @@ -7300,7 +7300,7 @@ pub unsafe fn Q6_vscatter_QRMWwV( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vscatter_QRMVwV( @@ -7324,7 +7324,7 @@ pub unsafe fn Q6_vscatter_QRMVwV( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_vadd_VwVwQ_carry_sat( @@ -7344,7 +7344,7 @@ pub unsafe fn Q6_Vw_vadd_VwVwQ_carry_sat( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -7356,7 +7356,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVhfVhf( @@ -7379,7 +7379,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVhfVhf( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVhfVhf( @@ -7402,7 +7402,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVhfVhf( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVhfVhf( @@ -7425,7 +7425,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVhfVhf( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -7437,7 +7437,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVsfVsf( @@ -7460,7 +7460,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVsfVsf( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVsfVsf( @@ -7483,7 +7483,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVsfVsf( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVsfVsf( diff --git a/library/stdarch/crates/core_arch/src/hexagon/v64.rs b/library/stdarch/crates/core_arch/src/hexagon/v64.rs index 517a807db4ae2..e9b18b2fd8efe 100644 --- a/library/stdarch/crates/core_arch/src/hexagon/v64.rs +++ b/library/stdarch/crates/core_arch/src/hexagon/v64.rs @@ -1043,7 +1043,7 @@ unsafe extern "unadjusted" { /// /// Instruction Type: LD /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(extractw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1055,7 +1055,7 @@ pub unsafe fn Q6_R_vextract_VR(vu: HvxVector, rs: i32) -> i32 { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(hi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1067,7 +1067,7 @@ pub unsafe fn Q6_V_hi_W(vss: HvxVectorPair) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(lo))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1079,7 +1079,7 @@ pub unsafe fn Q6_V_lo_W(vss: HvxVectorPair) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(lvsplatw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1091,7 +1091,7 @@ pub unsafe fn Q6_V_vsplat_R(rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1103,7 +1103,7 @@ pub unsafe fn Q6_Vuh_vabsdiff_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1115,7 +1115,7 @@ pub unsafe fn Q6_Vub_vabsdiff_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1127,7 +1127,7 @@ pub unsafe fn Q6_Vuh_vabsdiff_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsdiffw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1139,7 +1139,7 @@ pub unsafe fn Q6_Vuw_vabsdiff_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1151,7 +1151,7 @@ pub unsafe fn Q6_Vh_vabs_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1163,7 +1163,7 @@ pub unsafe fn Q6_Vh_vabs_Vh_sat(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1175,7 +1175,7 @@ pub unsafe fn Q6_Vw_vabs_Vw(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vabsw_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1187,7 +1187,7 @@ pub unsafe fn Q6_Vw_vabs_Vw_sat(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1199,7 +1199,7 @@ pub unsafe fn Q6_Vb_vadd_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1211,7 +1211,7 @@ pub unsafe fn Q6_Wb_vadd_WbWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1223,7 +1223,7 @@ pub unsafe fn Q6_Vh_vadd_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddh_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1235,7 +1235,7 @@ pub unsafe fn Q6_Wh_vadd_WhWh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1247,7 +1247,7 @@ pub unsafe fn Q6_Vh_vadd_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1259,7 +1259,7 @@ pub unsafe fn Q6_Wh_vadd_WhWh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1271,7 +1271,7 @@ pub unsafe fn Q6_Ww_vadd_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1283,7 +1283,7 @@ pub unsafe fn Q6_Wh_vadd_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1295,7 +1295,7 @@ pub unsafe fn Q6_Vub_vadd_VubVub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddubsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1307,7 +1307,7 @@ pub unsafe fn Q6_Wub_vadd_WubWub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1319,7 +1319,7 @@ pub unsafe fn Q6_Vuh_vadd_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1331,7 +1331,7 @@ pub unsafe fn Q6_Wuh_vadd_WuhWuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vadduhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1343,7 +1343,7 @@ pub unsafe fn Q6_Ww_vadd_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1355,7 +1355,7 @@ pub unsafe fn Q6_Vw_vadd_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddw_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1367,7 +1367,7 @@ pub unsafe fn Q6_Ww_vadd_WwWw(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1379,7 +1379,7 @@ pub unsafe fn Q6_Vw_vadd_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaddwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1391,7 +1391,7 @@ pub unsafe fn Q6_Ww_vadd_WwWw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(valignb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1403,7 +1403,7 @@ pub unsafe fn Q6_V_valign_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVecto /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(valignbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1415,7 +1415,7 @@ pub unsafe fn Q6_V_valign_VVI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vand))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1427,7 +1427,7 @@ pub unsafe fn Q6_V_vand_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1439,7 +1439,7 @@ pub unsafe fn Q6_Vh_vasl_VhR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1451,7 +1451,7 @@ pub unsafe fn Q6_Vh_vasl_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1463,7 +1463,7 @@ pub unsafe fn Q6_Vw_vasl_VwR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1475,7 +1475,7 @@ pub unsafe fn Q6_Vw_vaslacc_VwVwR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vaslwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1487,7 +1487,7 @@ pub unsafe fn Q6_Vw_vasl_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1499,7 +1499,7 @@ pub unsafe fn Q6_Vh_vasr_VhR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhbrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1511,7 +1511,7 @@ pub unsafe fn Q6_Vb_vasr_VhVhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1523,7 +1523,7 @@ pub unsafe fn Q6_Vub_vasr_VhVhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1535,7 +1535,7 @@ pub unsafe fn Q6_Vub_vasr_VhVhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1547,7 +1547,7 @@ pub unsafe fn Q6_Vh_vasr_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1559,7 +1559,7 @@ pub unsafe fn Q6_Vw_vasr_VwR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1571,7 +1571,7 @@ pub unsafe fn Q6_Vw_vasracc_VwVwR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1583,7 +1583,7 @@ pub unsafe fn Q6_Vh_vasr_VwVwR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVect /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1595,7 +1595,7 @@ pub unsafe fn Q6_Vh_vasr_VwVwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1607,7 +1607,7 @@ pub unsafe fn Q6_Vh_vasr_VwVwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hvx /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1619,7 +1619,7 @@ pub unsafe fn Q6_Vuh_vasr_VwVwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vasrwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1631,7 +1631,7 @@ pub unsafe fn Q6_Vw_vasr_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vassign))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1643,7 +1643,7 @@ pub unsafe fn Q6_V_equals_V(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vassignp))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1655,7 +1655,7 @@ pub unsafe fn Q6_W_equals_W(vuu: HvxVectorPair) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1667,7 +1667,7 @@ pub unsafe fn Q6_Vh_vavg_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavghrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1679,7 +1679,7 @@ pub unsafe fn Q6_Vh_vavg_VhVh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1691,7 +1691,7 @@ pub unsafe fn Q6_Vub_vavg_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgubrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1703,7 +1703,7 @@ pub unsafe fn Q6_Vub_vavg_VubVub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavguh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1715,7 +1715,7 @@ pub unsafe fn Q6_Vuh_vavg_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavguhrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1727,7 +1727,7 @@ pub unsafe fn Q6_Vuh_vavg_VuhVuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1739,7 +1739,7 @@ pub unsafe fn Q6_Vw_vavg_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vavgwrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1751,7 +1751,7 @@ pub unsafe fn Q6_Vw_vavg_VwVw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcl0h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1763,7 +1763,7 @@ pub unsafe fn Q6_Vuh_vcl0_Vuh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcl0w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1775,7 +1775,7 @@ pub unsafe fn Q6_Vuw_vcl0_Vuw(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vcombine))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1787,7 +1787,7 @@ pub unsafe fn Q6_W_vcombine_VV(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vd0))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1799,7 +1799,7 @@ pub unsafe fn Q6_V_vzero() -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1811,7 +1811,7 @@ pub unsafe fn Q6_Vb_vdeal_Vb(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealb4w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1823,7 +1823,7 @@ pub unsafe fn Q6_Vb_vdeale_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1835,7 +1835,7 @@ pub unsafe fn Q6_Vh_vdeal_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdealvdd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1847,7 +1847,7 @@ pub unsafe fn Q6_W_vdeal_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdelta))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1859,7 +1859,7 @@ pub unsafe fn Q6_V_vdelta_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1871,7 +1871,7 @@ pub unsafe fn Q6_Vh_vdmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1883,7 +1883,7 @@ pub unsafe fn Q6_Vh_vdmpyacc_VhVubRb(vx: HvxVector, vu: HvxVector, rt: i32) -> H /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1895,7 +1895,7 @@ pub unsafe fn Q6_Wh_vdmpy_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpybus_dv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1911,7 +1911,7 @@ pub unsafe fn Q6_Wh_vdmpyacc_WhWubRb( /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1923,7 +1923,7 @@ pub unsafe fn Q6_Vw_vdmpy_VhRb(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1935,7 +1935,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwVhRb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1947,7 +1947,7 @@ pub unsafe fn Q6_Ww_vdmpy_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhb_dv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1963,7 +1963,7 @@ pub unsafe fn Q6_Ww_vdmpyacc_WwWhRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhisat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1975,7 +1975,7 @@ pub unsafe fn Q6_Vw_vdmpy_WhRh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhisat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1987,7 +1987,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwWhRh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -1999,7 +1999,7 @@ pub unsafe fn Q6_Vw_vdmpy_VhRh_sat(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2011,7 +2011,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwVhRh_sat(vx: HvxVector, vu: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsuisat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2023,7 +2023,7 @@ pub unsafe fn Q6_Vw_vdmpy_WhRuh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsuisat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2035,7 +2035,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwWhRuh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsusat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2047,7 +2047,7 @@ pub unsafe fn Q6_Vw_vdmpy_VhRuh_sat(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhsusat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2059,7 +2059,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwVhRuh_sat(vx: HvxVector, vu: HvxVector, rt: i32) /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhvsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2071,7 +2071,7 @@ pub unsafe fn Q6_Vw_vdmpy_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdmpyhvsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2083,7 +2083,7 @@ pub unsafe fn Q6_Vw_vdmpyacc_VwVhVh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVec /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdsaduh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2095,7 +2095,7 @@ pub unsafe fn Q6_Wuw_vdsad_WuhRuh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vdsaduh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2111,7 +2111,7 @@ pub unsafe fn Q6_Wuw_vdsadacc_WuwWuhRuh( /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vinsertwr))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2123,7 +2123,7 @@ pub unsafe fn Q6_Vw_vinsert_VwR(vx: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlalignb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2135,7 +2135,7 @@ pub unsafe fn Q6_V_vlalign_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVect /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlalignbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2147,7 +2147,7 @@ pub unsafe fn Q6_V_vlalign_VVI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVec /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2159,7 +2159,7 @@ pub unsafe fn Q6_Vuh_vlsr_VuhR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2171,7 +2171,7 @@ pub unsafe fn Q6_Vh_vlsr_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2183,7 +2183,7 @@ pub unsafe fn Q6_Vuw_vlsr_VuwR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlsrwv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2195,7 +2195,7 @@ pub unsafe fn Q6_Vw_vlsr_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvvb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2207,7 +2207,7 @@ pub unsafe fn Q6_Vb_vlut32_VbVbR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVe /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvvb_oracc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2224,7 +2224,7 @@ pub unsafe fn Q6_Vb_vlut32or_VbVbVbR( /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2236,7 +2236,7 @@ pub unsafe fn Q6_Wh_vlut16_VbVhR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVe /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vlutvwh_oracc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2253,7 +2253,7 @@ pub unsafe fn Q6_Wh_vlut16or_WhVbVhR( /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2265,7 +2265,7 @@ pub unsafe fn Q6_Vh_vmax_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2277,7 +2277,7 @@ pub unsafe fn Q6_Vub_vmax_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2289,7 +2289,7 @@ pub unsafe fn Q6_Vuh_vmax_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmaxw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2301,7 +2301,7 @@ pub unsafe fn Q6_Vw_vmax_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2313,7 +2313,7 @@ pub unsafe fn Q6_Vh_vmin_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2325,7 +2325,7 @@ pub unsafe fn Q6_Vub_vmin_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2337,7 +2337,7 @@ pub unsafe fn Q6_Vuh_vmin_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vminw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2349,7 +2349,7 @@ pub unsafe fn Q6_Vw_vmin_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2361,7 +2361,7 @@ pub unsafe fn Q6_Wh_vmpa_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2377,7 +2377,7 @@ pub unsafe fn Q6_Wh_vmpaacc_WhWubRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2389,7 +2389,7 @@ pub unsafe fn Q6_Wh_vmpa_WubWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVec /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpabuuv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2401,7 +2401,7 @@ pub unsafe fn Q6_Wh_vmpa_WubWub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVe /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpahb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2413,7 +2413,7 @@ pub unsafe fn Q6_Ww_vmpa_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpahb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2429,7 +2429,7 @@ pub unsafe fn Q6_Ww_vmpaacc_WwWhRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2441,7 +2441,7 @@ pub unsafe fn Q6_Wh_vmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2453,7 +2453,7 @@ pub unsafe fn Q6_Wh_vmpyacc_WhVubRb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2465,7 +2465,7 @@ pub unsafe fn Q6_Wh_vmpy_VubVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybusv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2481,7 +2481,7 @@ pub unsafe fn Q6_Wh_vmpyacc_WhVubVb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2493,7 +2493,7 @@ pub unsafe fn Q6_Wh_vmpy_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpybv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2509,7 +2509,7 @@ pub unsafe fn Q6_Wh_vmpyacc_WhVbVb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyewuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2521,7 +2521,7 @@ pub unsafe fn Q6_Vw_vmpye_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2533,7 +2533,7 @@ pub unsafe fn Q6_Ww_vmpy_VhRh(vu: HvxVector, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhsat_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2549,7 +2549,7 @@ pub unsafe fn Q6_Ww_vmpyacc_WwVhRh_sat( /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhsrs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2561,7 +2561,7 @@ pub unsafe fn Q6_Vh_vmpy_VhRh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhss))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2573,7 +2573,7 @@ pub unsafe fn Q6_Vh_vmpy_VhRh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2585,7 +2585,7 @@ pub unsafe fn Q6_Ww_vmpy_VhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2601,7 +2601,7 @@ pub unsafe fn Q6_Ww_vmpyacc_WwVhVuh( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2613,7 +2613,7 @@ pub unsafe fn Q6_Ww_vmpy_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2629,7 +2629,7 @@ pub unsafe fn Q6_Ww_vmpyacc_WwVhVh( /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyhvsrs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2641,7 +2641,7 @@ pub unsafe fn Q6_Vh_vmpy_VhVh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVec /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyieoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2653,7 +2653,7 @@ pub unsafe fn Q6_Vw_vmpyieo_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2665,7 +2665,7 @@ pub unsafe fn Q6_Vw_vmpyieacc_VwVwVh(vx: HvxVector, vu: HvxVector, vv: HvxVector /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2677,7 +2677,7 @@ pub unsafe fn Q6_Vw_vmpyie_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiewuh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2689,7 +2689,7 @@ pub unsafe fn Q6_Vw_vmpyieacc_VwVwVuh(vx: HvxVector, vu: HvxVector, vv: HvxVecto /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyih))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2701,7 +2701,7 @@ pub unsafe fn Q6_Vh_vmpyi_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyih_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2713,7 +2713,7 @@ pub unsafe fn Q6_Vh_vmpyiacc_VhVhVh(vx: HvxVector, vu: HvxVector, vv: HvxVector) /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyihb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2725,7 +2725,7 @@ pub unsafe fn Q6_Vh_vmpyi_VhRb(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyihb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2737,7 +2737,7 @@ pub unsafe fn Q6_Vh_vmpyiacc_VhVhRb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiowh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2749,7 +2749,7 @@ pub unsafe fn Q6_Vw_vmpyio_VwVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2761,7 +2761,7 @@ pub unsafe fn Q6_Vw_vmpyi_VwRb(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2773,7 +2773,7 @@ pub unsafe fn Q6_Vw_vmpyiacc_VwVwRb(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2785,7 +2785,7 @@ pub unsafe fn Q6_Vw_vmpyi_VwRh(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyiwh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2797,7 +2797,7 @@ pub unsafe fn Q6_Vw_vmpyiacc_VwVwRh(vx: HvxVector, vu: HvxVector, rt: i32) -> Hv /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2809,7 +2809,7 @@ pub unsafe fn Q6_Vw_vmpyo_VwVh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_rnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2821,7 +2821,7 @@ pub unsafe fn Q6_Vw_vmpyo_VwVh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVe /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_rnd_sacc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2837,7 +2837,7 @@ pub unsafe fn Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyowh_sacc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2853,7 +2853,7 @@ pub unsafe fn Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2865,7 +2865,7 @@ pub unsafe fn Q6_Wuh_vmpy_VubRub(vu: HvxVector, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2881,7 +2881,7 @@ pub unsafe fn Q6_Wuh_vmpyacc_WuhVubRub( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyubv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2893,7 +2893,7 @@ pub unsafe fn Q6_Wuh_vmpy_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyubv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2909,7 +2909,7 @@ pub unsafe fn Q6_Wuh_vmpyacc_WuhVubVub( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2921,7 +2921,7 @@ pub unsafe fn Q6_Wuw_vmpy_VuhRuh(vu: HvxVector, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2937,7 +2937,7 @@ pub unsafe fn Q6_Wuw_vmpyacc_WuwVuhRuh( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuhv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2949,7 +2949,7 @@ pub unsafe fn Q6_Wuw_vmpy_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vmpyuhv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2965,7 +2965,7 @@ pub unsafe fn Q6_Wuw_vmpyacc_WuwVuhVuh( /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2977,7 +2977,7 @@ pub unsafe fn Q6_Vh_vnavg_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -2989,7 +2989,7 @@ pub unsafe fn Q6_Vb_vnavg_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnavgw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3001,7 +3001,7 @@ pub unsafe fn Q6_Vw_vnavg_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnormamth))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3013,7 +3013,7 @@ pub unsafe fn Q6_Vh_vnormamt_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnormamtw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3025,7 +3025,7 @@ pub unsafe fn Q6_Vw_vnormamt_Vw(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vnot))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3037,7 +3037,7 @@ pub unsafe fn Q6_V_vnot_V(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vor))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3049,7 +3049,7 @@ pub unsafe fn Q6_V_vor_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3061,7 +3061,7 @@ pub unsafe fn Q6_Vb_vpacke_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3073,7 +3073,7 @@ pub unsafe fn Q6_Vh_vpacke_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackhb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3085,7 +3085,7 @@ pub unsafe fn Q6_Vb_vpack_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackhub_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3097,7 +3097,7 @@ pub unsafe fn Q6_Vub_vpack_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3109,7 +3109,7 @@ pub unsafe fn Q6_Vb_vpacko_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3121,7 +3121,7 @@ pub unsafe fn Q6_Vh_vpacko_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackwh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3133,7 +3133,7 @@ pub unsafe fn Q6_Vh_vpack_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpackwuh_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3145,7 +3145,7 @@ pub unsafe fn Q6_Vuh_vpack_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vpopcounth))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3157,7 +3157,7 @@ pub unsafe fn Q6_Vh_vpopcount_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrdelta))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3169,7 +3169,7 @@ pub unsafe fn Q6_V_vrdelta_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3181,7 +3181,7 @@ pub unsafe fn Q6_Vw_vrmpy_VubRb(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3193,7 +3193,7 @@ pub unsafe fn Q6_Vw_vrmpyacc_VwVubRb(vx: HvxVector, vu: HvxVector, rt: i32) -> H /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3205,7 +3205,7 @@ pub unsafe fn Q6_Ww_vrmpy_WubRbI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVe /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3222,7 +3222,7 @@ pub unsafe fn Q6_Ww_vrmpyacc_WwWubRbI( /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3234,7 +3234,7 @@ pub unsafe fn Q6_Vw_vrmpy_VubVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybusv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3246,7 +3246,7 @@ pub unsafe fn Q6_Vw_vrmpyacc_VwVubVb(vx: HvxVector, vu: HvxVector, vv: HvxVector /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3258,7 +3258,7 @@ pub unsafe fn Q6_Vw_vrmpy_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpybv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3270,7 +3270,7 @@ pub unsafe fn Q6_Vw_vrmpyacc_VwVbVb(vx: HvxVector, vu: HvxVector, vv: HvxVector) /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3282,7 +3282,7 @@ pub unsafe fn Q6_Vuw_vrmpy_VubRub(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3294,7 +3294,7 @@ pub unsafe fn Q6_Vuw_vrmpyacc_VuwVubRub(vx: HvxVector, vu: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3306,7 +3306,7 @@ pub unsafe fn Q6_Wuw_vrmpy_WubRubI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> Hvx /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3323,7 +3323,7 @@ pub unsafe fn Q6_Wuw_vrmpyacc_WuwWubRubI( /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3335,7 +3335,7 @@ pub unsafe fn Q6_Vuw_vrmpy_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrmpyubv_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3347,7 +3347,7 @@ pub unsafe fn Q6_Vuw_vrmpyacc_VuwVubVub(vx: HvxVector, vu: HvxVector, vv: HvxVec /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vror))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3359,7 +3359,7 @@ pub unsafe fn Q6_V_vror_VR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3371,7 +3371,7 @@ pub unsafe fn Q6_Vb_vround_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundhub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3383,7 +3383,7 @@ pub unsafe fn Q6_Vub_vround_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3395,7 +3395,7 @@ pub unsafe fn Q6_Vh_vround_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vroundwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3407,7 +3407,7 @@ pub unsafe fn Q6_Vuh_vround_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrsadubi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3419,7 +3419,7 @@ pub unsafe fn Q6_Wuw_vrsad_WubRubI(vuu: HvxVectorPair, rt: i32, iu1: i32) -> Hvx /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vrsadubi_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3436,7 +3436,7 @@ pub unsafe fn Q6_Wuw_vrsadacc_WuwWubRubI( /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsathub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3448,7 +3448,7 @@ pub unsafe fn Q6_Vub_vsat_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsatwh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3460,7 +3460,7 @@ pub unsafe fn Q6_Vh_vsat_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3472,7 +3472,7 @@ pub unsafe fn Q6_Wh_vsxt_Vb(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3484,7 +3484,7 @@ pub unsafe fn Q6_Ww_vsxt_Vh(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3496,7 +3496,7 @@ pub unsafe fn Q6_Vh_vshuffe_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3508,7 +3508,7 @@ pub unsafe fn Q6_Vb_vshuff_Vb(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3520,7 +3520,7 @@ pub unsafe fn Q6_Vb_vshuffe_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3532,7 +3532,7 @@ pub unsafe fn Q6_Vh_vshuff_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3544,7 +3544,7 @@ pub unsafe fn Q6_Vb_vshuffo_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshuffvdd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3556,7 +3556,7 @@ pub unsafe fn Q6_W_vshuff_VVR(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVecto /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoeb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3568,7 +3568,7 @@ pub unsafe fn Q6_Wb_vshuffoe_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoeh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3580,7 +3580,7 @@ pub unsafe fn Q6_Wh_vshuffoe_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vshufoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3592,7 +3592,7 @@ pub unsafe fn Q6_Vh_vshuffo_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3604,7 +3604,7 @@ pub unsafe fn Q6_Vb_vsub_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubb_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3616,7 +3616,7 @@ pub unsafe fn Q6_Wb_vsub_WbWb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3628,7 +3628,7 @@ pub unsafe fn Q6_Vh_vsub_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubh_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3640,7 +3640,7 @@ pub unsafe fn Q6_Wh_vsub_WhWh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3652,7 +3652,7 @@ pub unsafe fn Q6_Vh_vsub_VhVh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3664,7 +3664,7 @@ pub unsafe fn Q6_Wh_vsub_WhWh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3676,7 +3676,7 @@ pub unsafe fn Q6_Ww_vsub_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3688,7 +3688,7 @@ pub unsafe fn Q6_Wh_vsub_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3700,7 +3700,7 @@ pub unsafe fn Q6_Vub_vsub_VubVub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsububsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3712,7 +3712,7 @@ pub unsafe fn Q6_Wub_vsub_WubWub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3724,7 +3724,7 @@ pub unsafe fn Q6_Vuh_vsub_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3736,7 +3736,7 @@ pub unsafe fn Q6_Wuh_vsub_WuhWuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubuhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3748,7 +3748,7 @@ pub unsafe fn Q6_Ww_vsub_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3760,7 +3760,7 @@ pub unsafe fn Q6_Vw_vsub_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubw_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3772,7 +3772,7 @@ pub unsafe fn Q6_Ww_vsub_WwWw(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVect /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3784,7 +3784,7 @@ pub unsafe fn Q6_Vw_vsub_VwVw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vsubwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3796,7 +3796,7 @@ pub unsafe fn Q6_Ww_vsub_WwWw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3808,7 +3808,7 @@ pub unsafe fn Q6_Wh_vtmpy_WbRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3824,7 +3824,7 @@ pub unsafe fn Q6_Wh_vtmpyacc_WhWbRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpybus))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3836,7 +3836,7 @@ pub unsafe fn Q6_Wh_vtmpy_WubRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpybus_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3852,7 +3852,7 @@ pub unsafe fn Q6_Wh_vtmpyacc_WhWubRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3864,7 +3864,7 @@ pub unsafe fn Q6_Ww_vtmpy_WhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vtmpyhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3880,7 +3880,7 @@ pub unsafe fn Q6_Ww_vtmpyacc_WwWhRb( /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3892,7 +3892,7 @@ pub unsafe fn Q6_Wh_vunpack_Vb(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3904,7 +3904,7 @@ pub unsafe fn Q6_Ww_vunpack_Vh(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackob))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3916,7 +3916,7 @@ pub unsafe fn Q6_Wh_vunpackoor_WhVb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVec /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackoh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3928,7 +3928,7 @@ pub unsafe fn Q6_Ww_vunpackoor_WwVh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVec /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3940,7 +3940,7 @@ pub unsafe fn Q6_Wuh_vunpack_Vub(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vunpackuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3952,7 +3952,7 @@ pub unsafe fn Q6_Wuw_vunpack_Vuh(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vxor))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3964,7 +3964,7 @@ pub unsafe fn Q6_V_vxor_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vzb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3976,7 +3976,7 @@ pub unsafe fn Q6_Wuh_vzxt_Vub(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[cfg_attr(test, assert_instr(vzh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -3988,7 +3988,7 @@ pub unsafe fn Q6_Wuw_vzxt_Vuh(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(lvsplatb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4000,7 +4000,7 @@ pub unsafe fn Q6_Vb_vsplat_R(rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(lvsplath))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4012,7 +4012,7 @@ pub unsafe fn Q6_Vh_vsplat_R(rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4024,7 +4024,7 @@ pub unsafe fn Q6_Vb_vadd_VbVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddbsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4036,7 +4036,7 @@ pub unsafe fn Q6_Wb_vadd_WbWb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddclbh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4048,7 +4048,7 @@ pub unsafe fn Q6_Vh_vadd_vclb_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddclbw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4060,7 +4060,7 @@ pub unsafe fn Q6_Vw_vadd_vclb_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddhw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4076,7 +4076,7 @@ pub unsafe fn Q6_Ww_vaddacc_WwVhVh( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddubh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4092,7 +4092,7 @@ pub unsafe fn Q6_Wh_vaddacc_WhVubVub( /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vaddububb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4104,7 +4104,7 @@ pub unsafe fn Q6_Vub_vadd_VubVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduhw_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4120,7 +4120,7 @@ pub unsafe fn Q6_Ww_vaddacc_WwVuhVuh( /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4132,7 +4132,7 @@ pub unsafe fn Q6_Vuw_vadd_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vadduwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4144,7 +4144,7 @@ pub unsafe fn Q6_Wuw_vadd_WuwWuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasrhbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4156,7 +4156,7 @@ pub unsafe fn Q6_Vb_vasr_VhVhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> Hvx /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasruwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4168,7 +4168,7 @@ pub unsafe fn Q6_Vuh_vasr_VuwVuwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vasrwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4180,7 +4180,7 @@ pub unsafe fn Q6_Vuh_vasr_VwVwR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlsrb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4192,7 +4192,7 @@ pub unsafe fn Q6_Vub_vlsr_VubR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvb_nm))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4204,7 +4204,7 @@ pub unsafe fn Q6_Vb_vlut32_VbVbR_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvb_oracci))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4221,7 +4221,7 @@ pub unsafe fn Q6_Vb_vlut32or_VbVbVbI( /// /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvvbi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4233,7 +4233,7 @@ pub unsafe fn Q6_Vb_vlut32_VbVbI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxV /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwh_nm))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4245,7 +4245,7 @@ pub unsafe fn Q6_Wh_vlut16_VbVhR_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwh_oracci))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4262,7 +4262,7 @@ pub unsafe fn Q6_Wh_vlut16or_WhVbVhI( /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vlutvwhi))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4274,7 +4274,7 @@ pub unsafe fn Q6_Wh_vlut16_VbVhI(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxV /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmaxb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4286,7 +4286,7 @@ pub unsafe fn Q6_Vb_vmax_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vminb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4298,7 +4298,7 @@ pub unsafe fn Q6_Vb_vmin_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpauhb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4310,7 +4310,7 @@ pub unsafe fn Q6_Ww_vmpa_WuhRb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpauhb_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4326,7 +4326,7 @@ pub unsafe fn Q6_Ww_vmpaacc_WwWuhRb( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyewuh_64))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4338,7 +4338,7 @@ pub unsafe fn Q6_W_vmpye_VwVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyiwub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4350,7 +4350,7 @@ pub unsafe fn Q6_Vw_vmpyi_VwRub(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyiwub_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4362,7 +4362,7 @@ pub unsafe fn Q6_Vw_vmpyiacc_VwVwRub(vx: HvxVector, vu: HvxVector, rt: i32) -> H /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vmpyowh_64_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4378,7 +4378,7 @@ pub unsafe fn Q6_W_vmpyoacc_WVwVh( /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vrounduhub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4390,7 +4390,7 @@ pub unsafe fn Q6_Vub_vround_VuhVuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vrounduwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4402,7 +4402,7 @@ pub unsafe fn Q6_Vuh_vround_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsatuwuh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4414,7 +4414,7 @@ pub unsafe fn Q6_Vuh_vsat_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubbsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4426,7 +4426,7 @@ pub unsafe fn Q6_Vb_vsub_VbVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubbsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4438,7 +4438,7 @@ pub unsafe fn Q6_Wb_vsub_WbWb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> Hvx /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubububb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4450,7 +4450,7 @@ pub unsafe fn Q6_Vub_vsub_VubVb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubuwsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4462,7 +4462,7 @@ pub unsafe fn Q6_Vuw_vsub_VuwVuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[cfg_attr(test, assert_instr(vsubuwsat_dv))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4474,7 +4474,7 @@ pub unsafe fn Q6_Wuw_vsub_WuwWuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vabsb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4486,7 +4486,7 @@ pub unsafe fn Q6_Vb_vabs_Vb(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vabsb_sat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4498,7 +4498,7 @@ pub unsafe fn Q6_Vb_vabs_Vb_sat(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vaslh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4510,7 +4510,7 @@ pub unsafe fn Q6_Vh_vaslacc_VhVhR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasrh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4522,7 +4522,7 @@ pub unsafe fn Q6_Vh_vasracc_VhVhR(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxV /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4534,7 +4534,7 @@ pub unsafe fn Q6_Vub_vasr_VuhVuhR_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4546,7 +4546,7 @@ pub unsafe fn Q6_Vub_vasr_VuhVuhR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vasruwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4558,7 +4558,7 @@ pub unsafe fn Q6_Vuh_vasr_VuwVuwR_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavgb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4570,7 +4570,7 @@ pub unsafe fn Q6_Vb_vavg_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavgbrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4582,7 +4582,7 @@ pub unsafe fn Q6_Vb_vavg_VbVb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavguw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4594,7 +4594,7 @@ pub unsafe fn Q6_Vuw_vavg_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vavguwrnd))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4606,7 +4606,7 @@ pub unsafe fn Q6_Vuw_vavg_VuwVuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: MAPPING /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vdd0))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4618,7 +4618,7 @@ pub unsafe fn Q6_W_vzero() -> HvxVectorPair { /// /// Instruction Type: CVI_GATHER /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4630,7 +4630,7 @@ pub unsafe fn Q6_vgather_ARMVh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVect /// /// Instruction Type: CVI_GATHER_DV /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4642,7 +4642,7 @@ pub unsafe fn Q6_vgather_ARMWw(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVec /// /// Instruction Type: CVI_GATHER /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vgathermw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4654,7 +4654,7 @@ pub unsafe fn Q6_vgather_ARMVw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVect /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpabuu))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4666,7 +4666,7 @@ pub unsafe fn Q6_Wh_vmpa_WubRub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpabuu_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4682,7 +4682,7 @@ pub unsafe fn Q6_Wh_vmpaacc_WhWubRub( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyh_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4694,7 +4694,7 @@ pub unsafe fn Q6_Ww_vmpyacc_WwVhRh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyuhe))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4706,7 +4706,7 @@ pub unsafe fn Q6_Vuw_vmpye_VuhRuh(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vmpyuhe_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4718,7 +4718,7 @@ pub unsafe fn Q6_Vuw_vmpyeacc_VuwVuhRuh(vx: HvxVector, vu: HvxVector, rt: i32) - /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vnavgb))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4730,7 +4730,7 @@ pub unsafe fn Q6_Vb_vnavg_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4742,7 +4742,7 @@ pub unsafe fn Q6_vscatter_RMVhV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) /// /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermh_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4754,7 +4754,7 @@ pub unsafe fn Q6_vscatteracc_RMVhV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVecto /// /// Instruction Type: CVI_SCATTER_DV /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermhw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4766,7 +4766,7 @@ pub unsafe fn Q6_vscatter_RMWwV(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVec /// /// Instruction Type: CVI_SCATTER_DV /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermhw_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4778,7 +4778,7 @@ pub unsafe fn Q6_vscatteracc_RMWwV(rt: i32, mu: i32, vvv: HvxVectorPair, vw: Hvx /// /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4790,7 +4790,7 @@ pub unsafe fn Q6_vscatter_RMVwV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) /// /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[cfg_attr(test, assert_instr(vscattermw_add))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4802,7 +4802,7 @@ pub unsafe fn Q6_vscatteracc_RMVwV(rt: i32, mu: i32, vv: HvxVector, vw: HvxVecto /// /// Instruction Type: CVI_VP_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vasr_into))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4818,7 +4818,7 @@ pub unsafe fn Q6_Ww_vasrinto_WwVwVw( /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vrotr))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4830,7 +4830,7 @@ pub unsafe fn Q6_Vuw_vrotr_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[cfg_attr(test, assert_instr(vsatdw))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4842,7 +4842,7 @@ pub unsafe fn Q6_Vw_vsatdw_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyhubs10))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4858,7 +4858,7 @@ pub unsafe fn Q6_Ww_v6mpy_WubWbI_h( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyhubs10_vxx))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4875,7 +4875,7 @@ pub unsafe fn Q6_Ww_v6mpyacc_WwWubWbI_h( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyvubs10))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4891,7 +4891,7 @@ pub unsafe fn Q6_Ww_v6mpy_WubWbI_v( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(v6mpyvubs10_vxx))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4908,7 +4908,7 @@ pub unsafe fn Q6_Ww_v6mpyacc_WwWubWbI_v( /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vabs_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4920,7 +4920,7 @@ pub unsafe fn Q6_Vhf_vabs_Vhf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vabs_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4932,7 +4932,7 @@ pub unsafe fn Q6_Vsf_vabs_Vsf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4944,7 +4944,7 @@ pub unsafe fn Q6_Vqf16_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4956,7 +4956,7 @@ pub unsafe fn Q6_Vhf_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4968,7 +4968,7 @@ pub unsafe fn Q6_Vqf16_vadd_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf16_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4980,7 +4980,7 @@ pub unsafe fn Q6_Vqf16_vadd_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -4992,7 +4992,7 @@ pub unsafe fn Q6_Vqf32_vadd_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_qf32_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5004,7 +5004,7 @@ pub unsafe fn Q6_Vqf32_vadd_Vqf32Vsf(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5016,7 +5016,7 @@ pub unsafe fn Q6_Vqf32_vadd_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5028,7 +5028,7 @@ pub unsafe fn Q6_Wsf_vadd_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vadd_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5040,7 +5040,7 @@ pub unsafe fn Q6_Vsf_vadd_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vassign_fp))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5052,7 +5052,7 @@ pub unsafe fn Q6_Vw_vfmv_Vw(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_hf_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5064,7 +5064,7 @@ pub unsafe fn Q6_Vhf_equals_Vqf16(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_hf_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5076,7 +5076,7 @@ pub unsafe fn Q6_Vhf_equals_Wqf32(vuu: HvxVectorPair) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vconv_sf_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5088,7 +5088,7 @@ pub unsafe fn Q6_Vsf_equals_Vqf32(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_b_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5100,7 +5100,7 @@ pub unsafe fn Q6_Vb_vcvt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_h_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5112,7 +5112,7 @@ pub unsafe fn Q6_Vh_vcvt_Vhf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_b))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5124,7 +5124,7 @@ pub unsafe fn Q6_Whf_vcvt_Vb(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5136,7 +5136,7 @@ pub unsafe fn Q6_Vhf_vcvt_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5148,7 +5148,7 @@ pub unsafe fn Q6_Vhf_vcvt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_ub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5160,7 +5160,7 @@ pub unsafe fn Q6_Whf_vcvt_Vub(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_hf_uh))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5172,7 +5172,7 @@ pub unsafe fn Q6_Vhf_vcvt_Vuh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5184,7 +5184,7 @@ pub unsafe fn Q6_Wsf_vcvt_Vhf(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_ub_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5196,7 +5196,7 @@ pub unsafe fn Q6_Vub_vcvt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vcvt_uh_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5208,7 +5208,7 @@ pub unsafe fn Q6_Vuh_vcvt_Vhf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vdmpy_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5220,7 +5220,7 @@ pub unsafe fn Q6_Vsf_vdmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vdmpy_sf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5232,7 +5232,7 @@ pub unsafe fn Q6_Vsf_vdmpyacc_VsfVhfVhf(vx: HvxVector, vu: HvxVector, vv: HvxVec /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmax_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5244,7 +5244,7 @@ pub unsafe fn Q6_Vhf_vfmax_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmax_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5256,7 +5256,7 @@ pub unsafe fn Q6_Vsf_vfmax_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmin_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5268,7 +5268,7 @@ pub unsafe fn Q6_Vhf_vfmin_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfmin_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5280,7 +5280,7 @@ pub unsafe fn Q6_Vsf_vfmin_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfneg_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5292,7 +5292,7 @@ pub unsafe fn Q6_Vhf_vfneg_Vhf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vfneg_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5304,7 +5304,7 @@ pub unsafe fn Q6_Vsf_vfneg_Vsf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmax_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5316,7 +5316,7 @@ pub unsafe fn Q6_Vhf_vmax_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmax_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5328,7 +5328,7 @@ pub unsafe fn Q6_Vsf_vmax_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmin_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5340,7 +5340,7 @@ pub unsafe fn Q6_Vhf_vmin_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmin_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5352,7 +5352,7 @@ pub unsafe fn Q6_Vsf_vmin_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5364,7 +5364,7 @@ pub unsafe fn Q6_Vhf_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_hf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5376,7 +5376,7 @@ pub unsafe fn Q6_Vhf_vmpyacc_VhfVhfVhf(vx: HvxVector, vu: HvxVector, vv: HvxVect /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5388,7 +5388,7 @@ pub unsafe fn Q6_Vqf16_vmpy_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5400,7 +5400,7 @@ pub unsafe fn Q6_Vqf16_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf16_mix_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5412,7 +5412,7 @@ pub unsafe fn Q6_Vqf16_vmpy_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5424,7 +5424,7 @@ pub unsafe fn Q6_Vqf32_vmpy_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5436,7 +5436,7 @@ pub unsafe fn Q6_Wqf32_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPai /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_mix_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5448,7 +5448,7 @@ pub unsafe fn Q6_Wqf32_vmpy_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorP /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5460,7 +5460,7 @@ pub unsafe fn Q6_Wqf32_vmpy_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_qf32_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5472,7 +5472,7 @@ pub unsafe fn Q6_Vqf32_vmpy_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5484,7 +5484,7 @@ pub unsafe fn Q6_Wsf_vmpy_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_hf_acc))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5500,7 +5500,7 @@ pub unsafe fn Q6_Wsf_vmpyacc_WsfVhfVhf( /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vmpy_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5512,7 +5512,7 @@ pub unsafe fn Q6_Vsf_vmpy_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5524,7 +5524,7 @@ pub unsafe fn Q6_Vqf16_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_hf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5536,7 +5536,7 @@ pub unsafe fn Q6_Vhf_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf16))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5548,7 +5548,7 @@ pub unsafe fn Q6_Vqf16_vsub_Vqf16Vqf16(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf16_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5560,7 +5560,7 @@ pub unsafe fn Q6_Vqf16_vsub_Vqf16Vhf(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf32))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5572,7 +5572,7 @@ pub unsafe fn Q6_Vqf32_vsub_Vqf32Vqf32(vu: HvxVector, vv: HvxVector) -> HvxVecto /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_qf32_mix))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5584,7 +5584,7 @@ pub unsafe fn Q6_Vqf32_vsub_Vqf32Vsf(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5596,7 +5596,7 @@ pub unsafe fn Q6_Vqf32_vsub_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5608,7 +5608,7 @@ pub unsafe fn Q6_Wsf_vsub_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[cfg_attr(test, assert_instr(vsub_sf_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5620,7 +5620,7 @@ pub unsafe fn Q6_Vsf_vsub_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvuhubrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5632,7 +5632,7 @@ pub unsafe fn Q6_Vub_vasr_WuhVub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> H /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvuhubsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5644,7 +5644,7 @@ pub unsafe fn Q6_Vub_vasr_WuhVub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVe /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvwuhrndsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5656,7 +5656,7 @@ pub unsafe fn Q6_Vuh_vasr_WwVuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> Hv /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vasrvwuhsat))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5668,7 +5668,7 @@ pub unsafe fn Q6_Vuh_vasr_WwVuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVec /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] #[cfg_attr(test, assert_instr(vmpyuhvs))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5680,7 +5680,7 @@ pub unsafe fn Q6_Vuh_vmpy_VuhVuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_h_hf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5692,7 +5692,7 @@ pub unsafe fn Q6_Vh_equals_Vhf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_hf_h))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5704,7 +5704,7 @@ pub unsafe fn Q6_Vhf_equals_Vh(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_sf_w))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5716,7 +5716,7 @@ pub unsafe fn Q6_Vsf_equals_Vw(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] #[cfg_attr(test, assert_instr(vconv_w_sf))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5728,7 +5728,7 @@ pub unsafe fn Q6_Vw_equals_Vsf(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(get_qfext))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5740,7 +5740,7 @@ pub unsafe fn Q6_V_vgetqfext_VR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(set_qfext))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5752,7 +5752,7 @@ pub unsafe fn Q6_V_vsetqfext_VR(vu: HvxVector, rt: i32) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vabs_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5764,7 +5764,7 @@ pub unsafe fn Q6_V_vabs_V(vu: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt2_hf_b))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5776,7 +5776,7 @@ pub unsafe fn Q6_Whf_vcvt2_Vb(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt2_hf_ub))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5788,7 +5788,7 @@ pub unsafe fn Q6_Whf_vcvt2_Vub(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_DV /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vcvt_hf_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5800,7 +5800,7 @@ pub unsafe fn Q6_Whf_vcvt_V(vu: HvxVector) -> HvxVectorPair { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfmax_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5812,7 +5812,7 @@ pub unsafe fn Q6_V_vfmax_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfmin_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5824,7 +5824,7 @@ pub unsafe fn Q6_V_vfmin_VV(vu: HvxVector, vv: HvxVector) -> HvxVector { /// /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] #[cfg_attr(test, assert_instr(vfneg_f8))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] @@ -5837,7 +5837,7 @@ pub unsafe fn Q6_V_vfneg_V(vu: HvxVector) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_and_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -5855,7 +5855,7 @@ pub unsafe fn Q6_Q_and_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_and_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -5873,7 +5873,7 @@ pub unsafe fn Q6_Q_and_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPre /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_not_Q(qs: HvxVectorPred) -> HvxVectorPred { @@ -5891,7 +5891,7 @@ pub unsafe fn Q6_Q_not_Q(qs: HvxVectorPred) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_or_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -5909,7 +5909,7 @@ pub unsafe fn Q6_Q_or_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_or_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -5927,7 +5927,7 @@ pub unsafe fn Q6_Q_or_QQn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vsetq_R(rt: i32) -> HvxVectorPred { @@ -5939,7 +5939,7 @@ pub unsafe fn Q6_Q_vsetq_R(rt: i32) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_xor_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -5957,7 +5957,7 @@ pub unsafe fn Q6_Q_xor_QQ(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VM_ST /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vmem_QnRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { @@ -5973,7 +5973,7 @@ pub unsafe fn Q6_vmem_QnRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VM_ST /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vmem_QnRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { @@ -5989,7 +5989,7 @@ pub unsafe fn Q6_vmem_QnRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVec /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VM_ST /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vmem_QRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { @@ -6005,7 +6005,7 @@ pub unsafe fn Q6_vmem_QRIV_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VM_ST /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vmem_QRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { @@ -6021,7 +6021,7 @@ pub unsafe fn Q6_vmem_QRIV(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vb_condacc_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6037,7 +6037,7 @@ pub unsafe fn Q6_Vb_condacc_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vb_condacc_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6053,7 +6053,7 @@ pub unsafe fn Q6_Vb_condacc_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vh_condacc_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6069,7 +6069,7 @@ pub unsafe fn Q6_Vh_condacc_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vh_condacc_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6085,7 +6085,7 @@ pub unsafe fn Q6_Vh_condacc_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_condacc_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6101,7 +6101,7 @@ pub unsafe fn Q6_Vw_condacc_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_condacc_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6117,7 +6117,7 @@ pub unsafe fn Q6_Vw_condacc_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vand_QR(qu: HvxVectorPred, rt: i32) -> HvxVector { @@ -6129,7 +6129,7 @@ pub unsafe fn Q6_V_vand_QR(qu: HvxVectorPred, rt: i32) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vandor_VQR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { @@ -6141,7 +6141,7 @@ pub unsafe fn Q6_V_vandor_VQR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxV /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vand_VR(vu: HvxVector, rt: i32) -> HvxVectorPred { @@ -6153,7 +6153,7 @@ pub unsafe fn Q6_Q_vand_VR(vu: HvxVector, rt: i32) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vandor_QVR(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred { @@ -6169,7 +6169,7 @@ pub unsafe fn Q6_Q_vandor_QVR(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxV /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eq_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6181,7 +6181,7 @@ pub unsafe fn Q6_Q_vcmp_eq_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqand_QVbVb( @@ -6204,7 +6204,7 @@ pub unsafe fn Q6_Q_vcmp_eqand_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqor_QVbVb( @@ -6227,7 +6227,7 @@ pub unsafe fn Q6_Q_vcmp_eqor_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqxacc_QVbVb( @@ -6250,7 +6250,7 @@ pub unsafe fn Q6_Q_vcmp_eqxacc_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eq_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6262,7 +6262,7 @@ pub unsafe fn Q6_Q_vcmp_eq_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqand_QVhVh( @@ -6285,7 +6285,7 @@ pub unsafe fn Q6_Q_vcmp_eqand_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqor_QVhVh( @@ -6308,7 +6308,7 @@ pub unsafe fn Q6_Q_vcmp_eqor_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqxacc_QVhVh( @@ -6331,7 +6331,7 @@ pub unsafe fn Q6_Q_vcmp_eqxacc_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eq_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6343,7 +6343,7 @@ pub unsafe fn Q6_Q_vcmp_eq_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqand_QVwVw( @@ -6366,7 +6366,7 @@ pub unsafe fn Q6_Q_vcmp_eqand_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqor_QVwVw( @@ -6389,7 +6389,7 @@ pub unsafe fn Q6_Q_vcmp_eqor_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_eqxacc_QVwVw( @@ -6412,7 +6412,7 @@ pub unsafe fn Q6_Q_vcmp_eqxacc_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6424,7 +6424,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VbVb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVbVb( @@ -6447,7 +6447,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVbVb( @@ -6470,7 +6470,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVbVb( @@ -6493,7 +6493,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVbVb( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6505,7 +6505,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VhVh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVhVh( @@ -6528,7 +6528,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVhVh( @@ -6551,7 +6551,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVhVh( @@ -6574,7 +6574,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVhVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6586,7 +6586,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VubVub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVubVub( @@ -6609,7 +6609,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVubVub( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVubVub( @@ -6632,7 +6632,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVubVub( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVubVub( @@ -6655,7 +6655,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVubVub( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6667,7 +6667,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VuhVuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVuhVuh( @@ -6690,7 +6690,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVuhVuh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVuhVuh( @@ -6713,7 +6713,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVuhVuh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVuhVuh( @@ -6736,7 +6736,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVuhVuh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6748,7 +6748,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VuwVuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVuwVuw( @@ -6771,7 +6771,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVuwVuw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVuwVuw( @@ -6794,7 +6794,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVuwVuw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVuwVuw( @@ -6817,7 +6817,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVuwVuw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -6829,7 +6829,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VwVw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVwVw( @@ -6852,7 +6852,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVwVw( @@ -6875,7 +6875,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVwVw( @@ -6898,7 +6898,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVwVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vmux_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector { @@ -6914,7 +6914,7 @@ pub unsafe fn Q6_V_vmux_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vb_condnac_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6930,7 +6930,7 @@ pub unsafe fn Q6_Vb_condnac_QnVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vb_condnac_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6946,7 +6946,7 @@ pub unsafe fn Q6_Vb_condnac_QVbVb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vh_condnac_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6962,7 +6962,7 @@ pub unsafe fn Q6_Vh_condnac_QnVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vh_condnac_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6978,7 +6978,7 @@ pub unsafe fn Q6_Vh_condnac_QVhVh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_condnac_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -6994,7 +6994,7 @@ pub unsafe fn Q6_Vw_condnac_QnVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVect /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_condnac_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { @@ -7010,7 +7010,7 @@ pub unsafe fn Q6_Vw_condnac_QVwVw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVecto /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_W_vswap_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair { @@ -7026,7 +7026,7 @@ pub unsafe fn Q6_W_vswap_QVV(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VP /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vsetq2_R(rt: i32) -> HvxVectorPred { @@ -7038,7 +7038,7 @@ pub unsafe fn Q6_Q_vsetq2_R(rt: i32) -> HvxVectorPred { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Qb_vshuffe_QhQh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -7056,7 +7056,7 @@ pub unsafe fn Q6_Qb_vshuffe_QhQh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVec /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA_DV /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Qh_vshuffe_QwQw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { @@ -7074,7 +7074,7 @@ pub unsafe fn Q6_Qh_vshuffe_QwQw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVec /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vand_QnR(qu: HvxVectorPred, rt: i32) -> HvxVector { @@ -7089,7 +7089,7 @@ pub unsafe fn Q6_V_vand_QnR(qu: HvxVectorPred, rt: i32) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VX_LATE /// Execution Slots: SLOT23 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vandor_VQnR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { @@ -7105,7 +7105,7 @@ pub unsafe fn Q6_V_vandor_VQnR(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> Hvx /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vand_QnV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { @@ -7120,7 +7120,7 @@ pub unsafe fn Q6_V_vand_QnV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_V_vand_QV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { @@ -7135,7 +7135,7 @@ pub unsafe fn Q6_V_vand_QV(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_GATHER /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vgather_AQRMVh( @@ -7159,7 +7159,7 @@ pub unsafe fn Q6_vgather_AQRMVh( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_GATHER_DV /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vgather_AQRMWw( @@ -7183,7 +7183,7 @@ pub unsafe fn Q6_vgather_AQRMWw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_GATHER /// Execution Slots: SLOT01 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vgather_AQRMVw( @@ -7207,7 +7207,7 @@ pub unsafe fn Q6_vgather_AQRMVw( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vb_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { @@ -7222,7 +7222,7 @@ pub unsafe fn Q6_Vb_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vh_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { @@ -7237,7 +7237,7 @@ pub unsafe fn Q6_Vh_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VS /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { @@ -7252,7 +7252,7 @@ pub unsafe fn Q6_Vw_prefixsum_Q(qv: HvxVectorPred) -> HvxVector { /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vscatter_QRMVhV( @@ -7276,7 +7276,7 @@ pub unsafe fn Q6_vscatter_QRMVhV( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_SCATTER_DV /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vscatter_QRMWwV( @@ -7300,7 +7300,7 @@ pub unsafe fn Q6_vscatter_QRMWwV( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_SCATTER /// Execution Slots: SLOT0 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_vscatter_QRMVwV( @@ -7324,7 +7324,7 @@ pub unsafe fn Q6_vscatter_QRMVwV( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Vw_vadd_VwVwQ_carry_sat( @@ -7344,7 +7344,7 @@ pub unsafe fn Q6_Vw_vadd_VwVwQ_carry_sat( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -7356,7 +7356,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VhfVhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVhfVhf( @@ -7379,7 +7379,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVhfVhf( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVhfVhf( @@ -7402,7 +7402,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVhfVhf( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVhfVhf( @@ -7425,7 +7425,7 @@ pub unsafe fn Q6_Q_vcmp_gtxacc_QVhfVhf( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { @@ -7437,7 +7437,7 @@ pub unsafe fn Q6_Q_vcmp_gt_VsfVsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtand_QVsfVsf( @@ -7460,7 +7460,7 @@ pub unsafe fn Q6_Q_vcmp_gtand_QVsfVsf( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtor_QVsfVsf( @@ -7483,7 +7483,7 @@ pub unsafe fn Q6_Q_vcmp_gtor_QVsfVsf( /// This is a compound operation composed of multiple HVX instructions. /// Instruction Type: CVI_VA /// Execution Slots: SLOT0123 -#[inline(always)] +#[inline] #[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] #[unstable(feature = "stdarch_hexagon", issue = "151523")] pub unsafe fn Q6_Q_vcmp_gtxacc_QVsfVsf( diff --git a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs index 79837e2224ee0..8a58c66313daf 100644 --- a/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-hexagon/src/main.rs @@ -1449,7 +1449,7 @@ fn generate_functions(intrinsics: &[IntrinsicInfo]) -> String { output.push_str(&format!("/// Execution Slots: {}\n", info.exec_slots)); // Generate attributes - output.push_str("#[inline(always)]\n"); + output.push_str("#[inline]\n"); output.push_str(&format!( "#[cfg_attr(target_arch = \"hexagon\", target_feature(enable = \"hvxv{}\"))]\n", info.min_arch @@ -1532,7 +1532,7 @@ fn generate_functions(intrinsics: &[IntrinsicInfo]) -> String { } // Generate attributes - output.push_str("#[inline(always)]\n"); + output.push_str("#[inline]\n"); output.push_str(&format!( "#[cfg_attr(target_arch = \"hexagon\", target_feature(enable = \"hvxv{}\"))]\n", info.min_arch From 13979fcd880b6245491806966ff3c2e1a9a595a7 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Wed, 6 May 2026 16:28:46 +0100 Subject: [PATCH 28/30] Remove #[inline(always)] from loongarch intrinsic generator & re-generate intrinsics --- .../src/loongarch64/lasx/generated.rs | 1144 ++++++++--------- .../src/loongarch64/lsx/generated.rs | 1056 +++++++-------- .../core_arch/src/loongarch_shared/mod.rs | 8 +- .../crates/stdarch-gen-loongarch/src/main.rs | 4 +- 4 files changed, 1106 insertions(+), 1106 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs index de629914ab2c0..c7dbd46480a42 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lasx/generated.rs @@ -1157,35 +1157,35 @@ unsafe extern "unadjusted" { fn __lasx_insert_128_hi(a: __v4i64, b: __v2i64) -> __v4i64; } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrar_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrar_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrar_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrar_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrar_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrar_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrar_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrar_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1194,7 +1194,7 @@ pub fn lasx_xvsrari_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrari_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1203,7 +1203,7 @@ pub fn lasx_xvsrari_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrari_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1212,7 +1212,7 @@ pub fn lasx_xvsrari_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrari_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1221,35 +1221,35 @@ pub fn lasx_xvsrari_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrari_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlr_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlr_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlr_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlr_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlr_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlr_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlr_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlr_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1258,7 +1258,7 @@ pub fn lasx_xvsrlri_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlri_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1267,7 +1267,7 @@ pub fn lasx_xvsrlri_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlri_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1276,7 +1276,7 @@ pub fn lasx_xvsrlri_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlri_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1285,35 +1285,35 @@ pub fn lasx_xvsrlri_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlri_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitclr_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclr_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitclr_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclr_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitclr_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclr_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitclr_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclr_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1322,7 +1322,7 @@ pub fn lasx_xvbitclri_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclri_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1331,7 +1331,7 @@ pub fn lasx_xvbitclri_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclri_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1340,7 +1340,7 @@ pub fn lasx_xvbitclri_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclri_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1349,35 +1349,35 @@ pub fn lasx_xvbitclri_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitclri_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitset_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitset_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitset_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitset_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitset_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitset_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitset_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitset_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1386,7 +1386,7 @@ pub fn lasx_xvbitseti_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitseti_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1395,7 +1395,7 @@ pub fn lasx_xvbitseti_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitseti_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1404,7 +1404,7 @@ pub fn lasx_xvbitseti_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitseti_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1413,35 +1413,35 @@ pub fn lasx_xvbitseti_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitseti_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitrev_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrev_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitrev_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrev_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitrev_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrev_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitrev_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrev_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1450,7 +1450,7 @@ pub fn lasx_xvbitrevi_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrevi_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1459,7 +1459,7 @@ pub fn lasx_xvbitrevi_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrevi_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1468,7 +1468,7 @@ pub fn lasx_xvbitrevi_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrevi_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1477,7 +1477,7 @@ pub fn lasx_xvbitrevi_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbitrevi_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1486,7 +1486,7 @@ pub fn lasx_xvsubi_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsubi_bu(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1495,7 +1495,7 @@ pub fn lasx_xvsubi_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsubi_hu(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1504,7 +1504,7 @@ pub fn lasx_xvsubi_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsubi_wu(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1513,7 +1513,7 @@ pub fn lasx_xvsubi_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsubi_du(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1522,7 +1522,7 @@ pub fn lasx_xvsat_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1531,7 +1531,7 @@ pub fn lasx_xvsat_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1540,7 +1540,7 @@ pub fn lasx_xvsat_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1549,7 +1549,7 @@ pub fn lasx_xvsat_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1558,7 +1558,7 @@ pub fn lasx_xvsat_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_bu(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1567,7 +1567,7 @@ pub fn lasx_xvsat_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_hu(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1576,7 +1576,7 @@ pub fn lasx_xvsat_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_wu(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1585,399 +1585,399 @@ pub fn lasx_xvsat_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsat_du(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadda_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadda_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadda_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadda_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadda_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadda_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadda_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadda_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsadd_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsadd_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavg_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavg_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvavgr_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvavgr_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssub_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssub_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvabsd_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvabsd_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_hu_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_hu_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_wu_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_wu_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_du_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_du_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_hu_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_hu_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_wu_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_wu_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_du_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_du_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1986,7 +1986,7 @@ pub fn lasx_xvrepl128vei_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrepl128vei_b(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1995,7 +1995,7 @@ pub fn lasx_xvrepl128vei_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrepl128vei_h(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2004,7 +2004,7 @@ pub fn lasx_xvrepl128vei_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrepl128vei_w(transmute(a), IMM2)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2013,203 +2013,203 @@ pub fn lasx_xvrepl128vei_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrepl128vei_d(transmute(a), IMM1)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickev_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickev_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickev_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickev_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickev_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickev_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickev_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickev_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickod_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickod_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickod_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickod_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickod_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickod_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpickod_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpickod_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvh_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvh_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvh_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvh_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvh_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvh_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvh_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvh_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvl_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvl_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvl_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvl_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvl_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvl_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvilvl_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvilvl_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackev_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackev_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackev_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackev_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackev_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackev_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackev_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackev_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackod_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackod_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackod_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackod_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackod_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackod_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvpackod_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpackod_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvshuf_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvshuf_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvshuf_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf_w(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvshuf_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf_d(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2218,7 +2218,7 @@ pub fn lasx_xvandi_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvandi_b(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2227,7 +2227,7 @@ pub fn lasx_xvori_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvori_b(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2236,7 +2236,7 @@ pub fn lasx_xvnori_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvnori_b(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2245,14 +2245,14 @@ pub fn lasx_xvxori_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvxori_b(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvbitsel_v(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvbitsel_v(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2261,7 +2261,7 @@ pub fn lasx_xvbitseli_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvbitseli_b(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2270,7 +2270,7 @@ pub fn lasx_xvshuf4i_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf4i_b(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2279,7 +2279,7 @@ pub fn lasx_xvshuf4i_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf4i_h(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2288,343 +2288,343 @@ pub fn lasx_xvshuf4i_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf4i_w(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclo_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclo_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclo_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclo_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclo_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclo_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvclo_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvclo_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvt_h_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcvt_h_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvt_s_d(a: m256d, b: m256d) -> m256 { unsafe { transmute(__lasx_xvfcvt_s_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmin_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfmin_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmin_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfmin_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmina_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfmina_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmina_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfmina_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmax_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfmax_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmax_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfmax_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmaxa_s(a: m256, b: m256) -> m256 { unsafe { transmute(__lasx_xvfmaxa_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfmaxa_d(a: m256d, b: m256d) -> m256d { unsafe { transmute(__lasx_xvfmaxa_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfclass_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvfclass_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfclass_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvfclass_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrecip_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrecip_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrecip_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrecip_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrecipe_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrecipe_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrecipe_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrecipe_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrsqrte_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrsqrte_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrsqrte_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrsqrte_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrint_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrint_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrint_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrint_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrsqrt_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrsqrt_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrsqrt_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrsqrt_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvflogb_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvflogb_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvflogb_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvflogb_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvth_s_h(a: m256i) -> m256 { unsafe { transmute(__lasx_xvfcvth_s_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvth_d_s(a: m256) -> m256d { unsafe { transmute(__lasx_xvfcvth_d_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvtl_s_h(a: m256i) -> m256 { unsafe { transmute(__lasx_xvfcvtl_s_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcvtl_d_s(a: m256) -> m256d { unsafe { transmute(__lasx_xvfcvtl_d_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftint_w_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftint_w_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftint_l_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftint_l_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftint_wu_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftint_wu_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftint_lu_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftint_lu_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrz_w_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrz_w_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrz_l_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrz_l_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrz_wu_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrz_wu_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrz_lu_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrz_lu_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffint_s_w(a: m256i) -> m256 { unsafe { transmute(__lasx_xvffint_s_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffint_d_l(a: m256i) -> m256d { unsafe { transmute(__lasx_xvffint_d_l(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffint_s_wu(a: m256i) -> m256 { unsafe { transmute(__lasx_xvffint_s_wu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffint_d_lu(a: m256i) -> m256d { unsafe { transmute(__lasx_xvffint_d_lu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve_b(a: m256i, b: i32) -> m256i { unsafe { transmute(__lasx_xvreplve_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve_h(a: m256i, b: i32) -> m256i { unsafe { transmute(__lasx_xvreplve_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve_w(a: m256i, b: i32) -> m256i { unsafe { transmute(__lasx_xvreplve_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve_d(a: m256i, b: i32) -> m256i { unsafe { transmute(__lasx_xvreplve_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2633,63 +2633,63 @@ pub fn lasx_xvpermi_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpermi_w(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmuh_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmuh_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2698,7 +2698,7 @@ pub fn lasx_xvsllwil_h_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_h_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2707,7 +2707,7 @@ pub fn lasx_xvsllwil_w_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_w_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2716,7 +2716,7 @@ pub fn lasx_xvsllwil_d_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_d_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2725,7 +2725,7 @@ pub fn lasx_xvsllwil_hu_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_hu_bu(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2734,7 +2734,7 @@ pub fn lasx_xvsllwil_wu_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_wu_hu(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2743,217 +2743,217 @@ pub fn lasx_xvsllwil_du_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvsllwil_du_wu(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsran_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsran_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsran_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsran_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsran_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsran_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_bu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_hu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssran_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssran_wu_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrarn_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarn_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrarn_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarn_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrarn_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarn_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_bu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_hu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrarn_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarn_wu_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrln_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrln_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrln_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrln_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrln_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrln_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_bu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_hu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_wu_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlrn_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrn_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlrn_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrn_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsrlrn_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrn_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_bu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_hu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_wu_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2962,7 +2962,7 @@ pub fn lasx_xvfrstpi_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvfrstpi_b(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2971,21 +2971,21 @@ pub fn lasx_xvfrstpi_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvfrstpi_h(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrstp_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvfrstp_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrstp_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvfrstp_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2994,7 +2994,7 @@ pub fn lasx_xvshuf4i_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvshuf4i_d(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3003,7 +3003,7 @@ pub fn lasx_xvbsrl_v(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbsrl_v(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3012,7 +3012,7 @@ pub fn lasx_xvbsll_v(a: m256i) -> m256i { unsafe { transmute(__lasx_xvbsll_v(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3021,7 +3021,7 @@ pub fn lasx_xvextrins_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvextrins_b(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3030,7 +3030,7 @@ pub fn lasx_xvextrins_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvextrins_h(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3039,7 +3039,7 @@ pub fn lasx_xvextrins_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvextrins_w(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3048,287 +3048,287 @@ pub fn lasx_xvextrins_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvextrins_d(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmskltz_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmskltz_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmskltz_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmskltz_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmskltz_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmskltz_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmskltz_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmskltz_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsigncov_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsigncov_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsigncov_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsigncov_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsigncov_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsigncov_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsigncov_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsigncov_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrne_w_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrne_w_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrne_l_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrne_l_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrp_w_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrp_w_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrp_l_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrp_l_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrm_w_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrm_w_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrm_l_d(a: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrm_l_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftint_w_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvftint_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffint_s_l(a: m256i, b: m256i) -> m256 { unsafe { transmute(__lasx_xvffint_s_l(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrz_w_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrz_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrp_w_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrp_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrm_w_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrm_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrne_w_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvftintrne_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftinth_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftinth_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintl_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintl_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffinth_d_w(a: m256i) -> m256d { unsafe { transmute(__lasx_xvffinth_d_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvffintl_d_w(a: m256i) -> m256d { unsafe { transmute(__lasx_xvffintl_d_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrzh_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrzh_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrzl_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrzl_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrph_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrph_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrpl_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrpl_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrmh_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrmh_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrml_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrml_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrneh_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrneh_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvftintrnel_l_s(a: m256) -> m256i { unsafe { transmute(__lasx_xvftintrnel_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrne_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrintrne_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrne_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrintrne_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrz_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrintrz_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrz_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrintrz_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrp_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrintrp_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrp_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrintrp_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrm_s(a: m256) -> m256 { unsafe { transmute(__lasx_xvfrintrm_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfrintrm_d(a: m256d) -> m256d { unsafe { transmute(__lasx_xvfrintrm_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3337,7 +3337,7 @@ pub unsafe fn lasx_xvld(mem_addr: *const i8) -> m256i { transmute(__lasx_xvld(mem_addr, IMM_S12)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3346,7 +3346,7 @@ pub unsafe fn lasx_xvst(a: m256i, mem_addr: *mut i8) { __lasx_xvst(transmute(a), mem_addr, IMM_S12) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3356,7 +3356,7 @@ pub unsafe fn lasx_xvstelm_b(a: m256i, mem_a __lasx_xvstelm_b(transmute(a), mem_addr, IMM_S8, IMM4) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3366,7 +3366,7 @@ pub unsafe fn lasx_xvstelm_h(a: m256i, mem_a __lasx_xvstelm_h(transmute(a), mem_addr, IMM_S8, IMM3) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3376,7 +3376,7 @@ pub unsafe fn lasx_xvstelm_w(a: m256i, mem_a __lasx_xvstelm_w(transmute(a), mem_addr, IMM_S8, IMM2) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3386,7 +3386,7 @@ pub unsafe fn lasx_xvstelm_d(a: m256i, mem_a __lasx_xvstelm_d(transmute(a), mem_addr, IMM_S8, IMM1) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3395,7 +3395,7 @@ pub fn lasx_xvinsve0_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvinsve0_w(transmute(a), transmute(b), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3404,7 +3404,7 @@ pub fn lasx_xvinsve0_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvinsve0_d(transmute(a), transmute(b), IMM2)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3413,7 +3413,7 @@ pub fn lasx_xvpickve_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvpickve_w(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3422,49 +3422,49 @@ pub fn lasx_xvpickve_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvpickve_d(transmute(a), IMM2)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrlrn_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrn_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvssrln_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrln_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3473,147 +3473,147 @@ pub fn lasx_xvldi() -> m256i { unsafe { transmute(__lasx_xvldi(IMM_S13)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lasx_xvldx(mem_addr: *const i8, b: i64) -> m256i { transmute(__lasx_xvldx(mem_addr, transmute(b))) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lasx_xvstx(a: m256i, mem_addr: *mut i8, b: i64) { __lasx_xvstx(transmute(a), mem_addr, transmute(b)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvextl_qu_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvextl_qu_du(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve0_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvreplve0_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve0_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvreplve0_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve0_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvreplve0_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve0_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvreplve0_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvreplve0_q(a: m256i) -> m256i { unsafe { transmute(__lasx_xvreplve0_q(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_h_b(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_h_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_w_h(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_w_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_d_w(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_d_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_w_b(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_w_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_d_h(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_d_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_d_b(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_d_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_hu_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_hu_bu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_wu_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_wu_hu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_du_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_du_wu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_wu_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_wu_bu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_du_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_du_hu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_vext2xv_du_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_vext2xv_du_bu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3622,7 +3622,7 @@ pub fn lasx_xvpermi_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvpermi_q(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3631,14 +3631,14 @@ pub fn lasx_xvpermi_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvpermi_d(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvperm_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvperm_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3647,7 +3647,7 @@ pub unsafe fn lasx_xvldrepl_b(mem_addr: *const i8) -> m256i transmute(__lasx_xvldrepl_b(mem_addr, IMM_S12)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3656,7 +3656,7 @@ pub unsafe fn lasx_xvldrepl_h(mem_addr: *const i8) -> m256i transmute(__lasx_xvldrepl_h(mem_addr, IMM_S11)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3665,7 +3665,7 @@ pub unsafe fn lasx_xvldrepl_w(mem_addr: *const i8) -> m256i transmute(__lasx_xvldrepl_w(mem_addr, IMM_S10)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3674,763 +3674,763 @@ pub unsafe fn lasx_xvldrepl_d(mem_addr: *const i8) -> m256i { transmute(__lasx_xvldrepl_d(mem_addr, IMM_S9)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwev_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwev_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsubwod_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsubwod_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_d_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_w_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_h_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_q_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_d_wu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_w_hu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_h_bu(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_d_wu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_d_wu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_w_hu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_w_hu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_h_bu_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_h_bu_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_d_wu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_d_wu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_w_hu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_w_hu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_h_bu_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_h_bu_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_d_wu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_d_wu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_w_hu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_w_hu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_h_bu_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_h_bu_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_d_wu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_d_wu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_w_hu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_w_hu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_h_bu_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_h_bu_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhaddw_qu_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhaddw_qu_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_q_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvhsubw_qu_du(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvhsubw_qu_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_q_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_q_d(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_d_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_d_w(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_w_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_w_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_h_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_h_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_q_du(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_q_du(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_d_wu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_d_wu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_w_hu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_w_hu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_h_bu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_h_bu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_q_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_q_d(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_d_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_d_w(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_w_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_w_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_h_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_h_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_q_du(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_q_du(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_d_wu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_d_wu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_w_hu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_w_hu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_h_bu(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_h_bu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_q_du_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_q_du_d(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_d_wu_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_d_wu_w(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_w_hu_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_w_hu_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwev_h_bu_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwev_h_bu_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_q_du_d(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_q_du_d(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_d_wu_w(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_d_wu_w(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_w_hu_h(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_w_hu_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmaddwod_h_bu_b(a: m256i, b: m256i, c: m256i) -> m256i { unsafe { transmute(__lasx_xvmaddwod_h_bu_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvrotr_b(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvrotr_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvrotr_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvrotr_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvrotr_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvrotr_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvrotr_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvrotr_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvadd_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvadd_q(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvsub_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsub_q(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwev_q_du_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwev_q_du_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvaddwod_q_du_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvaddwod_q_du_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwev_q_du_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwev_q_du_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmulwod_q_du_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvmulwod_q_du_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmskgez_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmskgez_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvmsknz_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvmsknz_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_h_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_h_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_w_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_w_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_d_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_d_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_q_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_q_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_hu_bu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_hu_bu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_wu_hu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_wu_hu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_du_wu(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_du_wu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvexth_qu_du(a: m256i) -> m256i { unsafe { transmute(__lasx_xvexth_qu_du(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4439,7 +4439,7 @@ pub fn lasx_xvrotri_b(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrotri_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4448,7 +4448,7 @@ pub fn lasx_xvrotri_h(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrotri_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4457,7 +4457,7 @@ pub fn lasx_xvrotri_w(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrotri_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4466,14 +4466,14 @@ pub fn lasx_xvrotri_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvrotri_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvextl_q_d(a: m256i) -> m256i { unsafe { transmute(__lasx_xvextl_q_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4482,7 +4482,7 @@ pub fn lasx_xvsrlni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4491,7 +4491,7 @@ pub fn lasx_xvsrlni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4500,7 +4500,7 @@ pub fn lasx_xvsrlni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4509,7 +4509,7 @@ pub fn lasx_xvsrlni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4518,7 +4518,7 @@ pub fn lasx_xvsrlrni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4527,7 +4527,7 @@ pub fn lasx_xvsrlrni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4536,7 +4536,7 @@ pub fn lasx_xvsrlrni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4545,7 +4545,7 @@ pub fn lasx_xvsrlrni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrlrni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4554,7 +4554,7 @@ pub fn lasx_xvssrlni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4563,7 +4563,7 @@ pub fn lasx_xvssrlni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4572,7 +4572,7 @@ pub fn lasx_xvssrlni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4581,7 +4581,7 @@ pub fn lasx_xvssrlni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4590,7 +4590,7 @@ pub fn lasx_xvssrlni_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4599,7 +4599,7 @@ pub fn lasx_xvssrlni_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4608,7 +4608,7 @@ pub fn lasx_xvssrlni_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4617,7 +4617,7 @@ pub fn lasx_xvssrlni_du_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4626,7 +4626,7 @@ pub fn lasx_xvssrlrni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4635,7 +4635,7 @@ pub fn lasx_xvssrlrni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4644,7 +4644,7 @@ pub fn lasx_xvssrlrni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4653,7 +4653,7 @@ pub fn lasx_xvssrlrni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4662,7 +4662,7 @@ pub fn lasx_xvssrlrni_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4671,7 +4671,7 @@ pub fn lasx_xvssrlrni_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4680,7 +4680,7 @@ pub fn lasx_xvssrlrni_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4689,7 +4689,7 @@ pub fn lasx_xvssrlrni_du_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrlrni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4698,7 +4698,7 @@ pub fn lasx_xvsrani_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrani_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4707,7 +4707,7 @@ pub fn lasx_xvsrani_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrani_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4716,7 +4716,7 @@ pub fn lasx_xvsrani_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrani_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4725,7 +4725,7 @@ pub fn lasx_xvsrani_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrani_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4734,7 +4734,7 @@ pub fn lasx_xvsrarni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4743,7 +4743,7 @@ pub fn lasx_xvsrarni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4752,7 +4752,7 @@ pub fn lasx_xvsrarni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4761,7 +4761,7 @@ pub fn lasx_xvsrarni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvsrarni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4770,7 +4770,7 @@ pub fn lasx_xvssrani_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4779,7 +4779,7 @@ pub fn lasx_xvssrani_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4788,7 +4788,7 @@ pub fn lasx_xvssrani_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4797,7 +4797,7 @@ pub fn lasx_xvssrani_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4806,7 +4806,7 @@ pub fn lasx_xvssrani_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4815,7 +4815,7 @@ pub fn lasx_xvssrani_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4824,7 +4824,7 @@ pub fn lasx_xvssrani_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4833,7 +4833,7 @@ pub fn lasx_xvssrani_du_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrani_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4842,7 +4842,7 @@ pub fn lasx_xvssrarni_b_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4851,7 +4851,7 @@ pub fn lasx_xvssrarni_h_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4860,7 +4860,7 @@ pub fn lasx_xvssrarni_w_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4869,7 +4869,7 @@ pub fn lasx_xvssrarni_d_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4878,7 +4878,7 @@ pub fn lasx_xvssrarni_bu_h(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4887,7 +4887,7 @@ pub fn lasx_xvssrarni_hu_w(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4896,7 +4896,7 @@ pub fn lasx_xvssrarni_wu_d(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4905,385 +4905,385 @@ pub fn lasx_xvssrarni_du_q(a: m256i, b: m256i) -> m256i { unsafe { transmute(__lasx_xvssrarni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbnz_b(a: m256i) -> i32 { unsafe { transmute(__lasx_xbnz_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbnz_d(a: m256i) -> i32 { unsafe { transmute(__lasx_xbnz_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbnz_h(a: m256i) -> i32 { unsafe { transmute(__lasx_xbnz_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbnz_v(a: m256i) -> i32 { unsafe { transmute(__lasx_xbnz_v(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbnz_w(a: m256i) -> i32 { unsafe { transmute(__lasx_xbnz_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbz_b(a: m256i) -> i32 { unsafe { transmute(__lasx_xbz_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbz_d(a: m256i) -> i32 { unsafe { transmute(__lasx_xbz_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbz_h(a: m256i) -> i32 { unsafe { transmute(__lasx_xbz_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbz_v(a: m256i) -> i32 { unsafe { transmute(__lasx_xbz_v(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xbz_w(a: m256i) -> i32 { unsafe { transmute(__lasx_xbz_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_caf_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_caf_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_caf_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_caf_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_ceq_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_ceq_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_ceq_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_ceq_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cle_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cle_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cle_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cle_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_clt_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_clt_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_clt_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_clt_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cne_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cne_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cne_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cne_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cor_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cor_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cor_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cor_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cueq_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cueq_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cueq_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cueq_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cule_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cule_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cule_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cule_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cult_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cult_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cult_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cult_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cun_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cun_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cune_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_cune_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cune_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cune_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_cun_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_cun_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_saf_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_saf_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_saf_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_saf_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_seq_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_seq_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_seq_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_seq_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sle_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sle_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sle_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sle_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_slt_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_slt_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_slt_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_slt_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sne_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sne_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sne_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sne_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sor_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sor_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sor_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sor_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sueq_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sueq_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sueq_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sueq_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sule_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sule_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sule_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sule_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sult_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sult_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sult_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sult_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sun_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sun_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sune_d(a: m256d, b: m256d) -> m256i { unsafe { transmute(__lasx_xvfcmp_sune_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sune_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sune_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_xvfcmp_sun_s(a: m256, b: m256) -> m256i { unsafe { transmute(__lasx_xvfcmp_sun_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5292,7 +5292,7 @@ pub fn lasx_xvpickve_d_f(a: m256d) -> m256d { unsafe { transmute(__lasx_xvpickve_d_f(transmute(a), IMM2)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -5301,126 +5301,126 @@ pub fn lasx_xvpickve_w_f(a: m256) -> m256 { unsafe { transmute(__lasx_xvpickve_w_f(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_cast_128_s(a: m128) -> m256 { unsafe { transmute(__lasx_cast_128_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_cast_128_d(a: m128d) -> m256d { unsafe { transmute(__lasx_cast_128_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_cast_128(a: m128i) -> m256i { unsafe { transmute(__lasx_cast_128(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_concat_128_s(a: m128, b: m128) -> m256 { unsafe { transmute(__lasx_concat_128_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_concat_128_d(a: m128d, b: m128d) -> m256d { unsafe { transmute(__lasx_concat_128_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_concat_128(a: m128i, b: m128i) -> m256i { unsafe { transmute(__lasx_concat_128(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_lo_s(a: m256) -> m128 { unsafe { transmute(__lasx_extract_128_lo_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_hi_s(a: m256) -> m128 { unsafe { transmute(__lasx_extract_128_hi_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_lo_d(a: m256d) -> m128d { unsafe { transmute(__lasx_extract_128_lo_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_hi_d(a: m256d) -> m128d { unsafe { transmute(__lasx_extract_128_hi_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_lo(a: m256i) -> m128i { unsafe { transmute(__lasx_extract_128_lo(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_extract_128_hi(a: m256i) -> m128i { unsafe { transmute(__lasx_extract_128_hi(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_lo_s(a: m256, b: m128) -> m256 { unsafe { transmute(__lasx_insert_128_lo_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_hi_s(a: m256, b: m128) -> m256 { unsafe { transmute(__lasx_insert_128_hi_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_lo_d(a: m256d, b: m128d) -> m256d { unsafe { transmute(__lasx_insert_128_lo_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_hi_d(a: m256d, b: m128d) -> m256d { unsafe { transmute(__lasx_insert_128_hi_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_lo(a: m256i, b: m128i) -> m256i { unsafe { transmute(__lasx_insert_128_lo(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lasx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lasx_insert_128_hi(a: m256i, b: m128i) -> m256i { diff --git a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs index d2d77e2f3e932..4ec3cdf0c5abb 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/lsx/generated.rs @@ -1069,35 +1069,35 @@ unsafe extern "unadjusted" { fn __lsx_vfcmp_sun_s(a: __v4f32, b: __v4f32) -> __v4i32; } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrar_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrar_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrar_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrar_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrar_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrar_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrar_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrar_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1106,7 +1106,7 @@ pub fn lsx_vsrari_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrari_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1115,7 +1115,7 @@ pub fn lsx_vsrari_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrari_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1124,7 +1124,7 @@ pub fn lsx_vsrari_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrari_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1133,35 +1133,35 @@ pub fn lsx_vsrari_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrari_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlr_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlr_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlr_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlr_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlr_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlr_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlr_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlr_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1170,7 +1170,7 @@ pub fn lsx_vsrlri_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrlri_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1179,7 +1179,7 @@ pub fn lsx_vsrlri_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrlri_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1188,7 +1188,7 @@ pub fn lsx_vsrlri_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrlri_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1197,35 +1197,35 @@ pub fn lsx_vsrlri_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vsrlri_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitclr_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitclr_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitclr_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitclr_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitclr_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitclr_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitclr_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitclr_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1234,7 +1234,7 @@ pub fn lsx_vbitclri_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitclri_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1243,7 +1243,7 @@ pub fn lsx_vbitclri_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitclri_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1252,7 +1252,7 @@ pub fn lsx_vbitclri_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitclri_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1261,35 +1261,35 @@ pub fn lsx_vbitclri_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitclri_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitset_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitset_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitset_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitset_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitset_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitset_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitset_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitset_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1298,7 +1298,7 @@ pub fn lsx_vbitseti_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitseti_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1307,7 +1307,7 @@ pub fn lsx_vbitseti_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitseti_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1316,7 +1316,7 @@ pub fn lsx_vbitseti_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitseti_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1325,35 +1325,35 @@ pub fn lsx_vbitseti_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitseti_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitrev_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitrev_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitrev_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitrev_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitrev_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitrev_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitrev_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitrev_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1362,7 +1362,7 @@ pub fn lsx_vbitrevi_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitrevi_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1371,7 +1371,7 @@ pub fn lsx_vbitrevi_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitrevi_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1380,7 +1380,7 @@ pub fn lsx_vbitrevi_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitrevi_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1389,7 +1389,7 @@ pub fn lsx_vbitrevi_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vbitrevi_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1398,7 +1398,7 @@ pub fn lsx_vsubi_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsubi_bu(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1407,7 +1407,7 @@ pub fn lsx_vsubi_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsubi_hu(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1416,7 +1416,7 @@ pub fn lsx_vsubi_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsubi_wu(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1425,7 +1425,7 @@ pub fn lsx_vsubi_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vsubi_du(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1434,7 +1434,7 @@ pub fn lsx_vsat_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1443,7 +1443,7 @@ pub fn lsx_vsat_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1452,7 +1452,7 @@ pub fn lsx_vsat_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1461,7 +1461,7 @@ pub fn lsx_vsat_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1470,7 +1470,7 @@ pub fn lsx_vsat_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_bu(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1479,7 +1479,7 @@ pub fn lsx_vsat_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_hu(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1488,7 +1488,7 @@ pub fn lsx_vsat_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_wu(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1497,427 +1497,427 @@ pub fn lsx_vsat_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vsat_du(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadda_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadda_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadda_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadda_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadda_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadda_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadda_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadda_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsadd_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsadd_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavg_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavg_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vavgr_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vavgr_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssub_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssub_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vabsd_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vabsd_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_hu_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_hu_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_wu_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_wu_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_du_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_du_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_hu_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_hu_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_wu_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_wu_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_du_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_du_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplve_b(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vreplve_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplve_h(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vreplve_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplve_w(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vreplve_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vreplve_d(a: m128i, b: i32) -> m128i { unsafe { transmute(__lsx_vreplve_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1926,7 +1926,7 @@ pub fn lsx_vreplvei_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vreplvei_b(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1935,7 +1935,7 @@ pub fn lsx_vreplvei_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vreplvei_h(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1944,7 +1944,7 @@ pub fn lsx_vreplvei_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vreplvei_w(transmute(a), IMM2)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -1953,196 +1953,196 @@ pub fn lsx_vreplvei_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vreplvei_d(transmute(a), IMM1)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickev_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickev_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickev_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickev_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickev_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickev_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickev_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickev_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickod_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickod_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickod_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickod_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickod_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickod_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpickod_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpickod_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvh_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvh_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvh_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvh_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvh_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvh_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvh_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvh_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvl_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvl_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvl_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvl_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvl_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvl_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vilvl_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vilvl_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackev_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackev_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackev_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackev_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackev_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackev_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackev_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackev_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackod_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackod_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackod_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackod_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackod_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackod_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vpackod_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpackod_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vshuf_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vshuf_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vshuf_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vshuf_w(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vshuf_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vshuf_d(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2151,7 +2151,7 @@ pub fn lsx_vandi_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vandi_b(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2160,7 +2160,7 @@ pub fn lsx_vori_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vori_b(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2169,7 +2169,7 @@ pub fn lsx_vnori_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vnori_b(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2178,14 +2178,14 @@ pub fn lsx_vxori_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vxori_b(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vbitsel_v(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vbitsel_v(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2194,7 +2194,7 @@ pub fn lsx_vbitseli_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vbitseli_b(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2203,7 +2203,7 @@ pub fn lsx_vshuf4i_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vshuf4i_b(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2212,7 +2212,7 @@ pub fn lsx_vshuf4i_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vshuf4i_h(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2221,371 +2221,371 @@ pub fn lsx_vshuf4i_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vshuf4i_w(transmute(a), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclo_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vclo_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclo_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vclo_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclo_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vclo_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vclo_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vclo_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvt_h_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcvt_h_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvt_s_d(a: m128d, b: m128d) -> m128 { unsafe { transmute(__lsx_vfcvt_s_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmin_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfmin_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmin_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfmin_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmina_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfmina_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmina_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfmina_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmax_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfmax_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmax_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfmax_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmaxa_s(a: m128, b: m128) -> m128 { unsafe { transmute(__lsx_vfmaxa_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfmaxa_d(a: m128d, b: m128d) -> m128d { unsafe { transmute(__lsx_vfmaxa_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfclass_s(a: m128) -> m128i { unsafe { transmute(__lsx_vfclass_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfclass_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vfclass_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrecip_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrecip_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrecip_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrecip_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrecipe_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrecipe_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrecipe_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrecipe_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrsqrte_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrsqrte_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx,frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrsqrte_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrsqrte_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrint_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrint_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrint_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrint_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrsqrt_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrsqrt_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrsqrt_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrsqrt_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vflogb_s(a: m128) -> m128 { unsafe { transmute(__lsx_vflogb_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vflogb_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vflogb_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvth_s_h(a: m128i) -> m128 { unsafe { transmute(__lsx_vfcvth_s_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvth_d_s(a: m128) -> m128d { unsafe { transmute(__lsx_vfcvth_d_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvtl_s_h(a: m128i) -> m128 { unsafe { transmute(__lsx_vfcvtl_s_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcvtl_d_s(a: m128) -> m128d { unsafe { transmute(__lsx_vfcvtl_d_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftint_w_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftint_w_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftint_l_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftint_l_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftint_wu_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftint_wu_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftint_lu_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftint_lu_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrz_w_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrz_w_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrz_l_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftintrz_l_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrz_wu_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrz_wu_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrz_lu_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftintrz_lu_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffint_s_w(a: m128i) -> m128 { unsafe { transmute(__lsx_vffint_s_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffint_d_l(a: m128i) -> m128d { unsafe { transmute(__lsx_vffint_d_l(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffint_s_wu(a: m128i) -> m128 { unsafe { transmute(__lsx_vffint_s_wu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffint_d_lu(a: m128i) -> m128d { unsafe { transmute(__lsx_vffint_d_lu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmuh_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmuh_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2594,7 +2594,7 @@ pub fn lsx_vsllwil_h_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_h_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2603,7 +2603,7 @@ pub fn lsx_vsllwil_w_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_w_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2612,7 +2612,7 @@ pub fn lsx_vsllwil_d_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_d_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2621,7 +2621,7 @@ pub fn lsx_vsllwil_hu_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_hu_bu(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2630,7 +2630,7 @@ pub fn lsx_vsllwil_wu_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_wu_hu(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2639,217 +2639,217 @@ pub fn lsx_vsllwil_du_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vsllwil_du_wu(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsran_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsran_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsran_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsran_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsran_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsran_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_bu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_hu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssran_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssran_wu_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrarn_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarn_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrarn_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarn_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrarn_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarn_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_bu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_hu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrarn_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarn_wu_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrln_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrln_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrln_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrln_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrln_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrln_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_bu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_hu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_wu_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlrn_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrn_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlrn_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrn_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsrlrn_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrn_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_bu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_hu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_wu_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2858,7 +2858,7 @@ pub fn lsx_vfrstpi_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vfrstpi_b(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2867,21 +2867,21 @@ pub fn lsx_vfrstpi_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vfrstpi_h(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrstp_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vfrstp_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrstp_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vfrstp_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2890,7 +2890,7 @@ pub fn lsx_vshuf4i_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vshuf4i_d(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2899,7 +2899,7 @@ pub fn lsx_vbsrl_v(a: m128i) -> m128i { unsafe { transmute(__lsx_vbsrl_v(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2908,7 +2908,7 @@ pub fn lsx_vbsll_v(a: m128i) -> m128i { unsafe { transmute(__lsx_vbsll_v(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2917,7 +2917,7 @@ pub fn lsx_vextrins_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vextrins_b(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2926,7 +2926,7 @@ pub fn lsx_vextrins_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vextrins_h(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2935,7 +2935,7 @@ pub fn lsx_vextrins_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vextrins_w(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -2944,287 +2944,287 @@ pub fn lsx_vextrins_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vextrins_d(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmskltz_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vmskltz_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmskltz_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vmskltz_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmskltz_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vmskltz_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmskltz_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vmskltz_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsigncov_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsigncov_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsigncov_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsigncov_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsigncov_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsigncov_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsigncov_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsigncov_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrne_w_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrne_w_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrne_l_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftintrne_l_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrp_w_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrp_w_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrp_l_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftintrp_l_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrm_w_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrm_w_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrm_l_d(a: m128d) -> m128i { unsafe { transmute(__lsx_vftintrm_l_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftint_w_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vftint_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffint_s_l(a: m128i, b: m128i) -> m128 { unsafe { transmute(__lsx_vffint_s_l(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrz_w_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vftintrz_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrp_w_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vftintrp_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrm_w_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vftintrm_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrne_w_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vftintrne_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintl_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintl_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftinth_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftinth_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffinth_d_w(a: m128i) -> m128d { unsafe { transmute(__lsx_vffinth_d_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vffintl_d_w(a: m128i) -> m128d { unsafe { transmute(__lsx_vffintl_d_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrzl_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrzl_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrzh_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrzh_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrpl_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrpl_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrph_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrph_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrml_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrml_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrmh_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrmh_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrnel_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrnel_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vftintrneh_l_s(a: m128) -> m128i { unsafe { transmute(__lsx_vftintrneh_l_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrne_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrintrne_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrne_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrintrne_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrz_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrintrz_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrz_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrintrz_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrp_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrintrp_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrp_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrintrp_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrm_s(a: m128) -> m128 { unsafe { transmute(__lsx_vfrintrm_s(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfrintrm_d(a: m128d) -> m128d { unsafe { transmute(__lsx_vfrintrm_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3234,7 +3234,7 @@ pub unsafe fn lsx_vstelm_b(a: m128i, mem_add __lsx_vstelm_b(transmute(a), mem_addr, IMM_S8, IMM4) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3244,7 +3244,7 @@ pub unsafe fn lsx_vstelm_h(a: m128i, mem_add __lsx_vstelm_h(transmute(a), mem_addr, IMM_S8, IMM3) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3254,7 +3254,7 @@ pub unsafe fn lsx_vstelm_w(a: m128i, mem_add __lsx_vstelm_w(transmute(a), mem_addr, IMM_S8, IMM2) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2, 3)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3264,693 +3264,693 @@ pub unsafe fn lsx_vstelm_d(a: m128i, mem_add __lsx_vstelm_d(transmute(a), mem_addr, IMM_S8, IMM1) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_d_wu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_d_wu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_w_hu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_w_hu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_h_bu_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_h_bu_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_d_wu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_d_wu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_w_hu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_w_hu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_h_bu_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_h_bu_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwev_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwev_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsubwod_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsubwod_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwev_q_du_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwev_q_du_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vaddwod_q_du_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vaddwod_q_du_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_d_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_d_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_w_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_w_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_h_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_h_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_d_wu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_d_wu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_w_hu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_w_hu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_h_bu(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_h_bu(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_d_wu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_d_wu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_w_hu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_w_hu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_h_bu_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_h_bu_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_d_wu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_d_wu_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_w_hu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_w_hu_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_h_bu_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_h_bu_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_q_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_q_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwev_q_du_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwev_q_du_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmulwod_q_du_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vmulwod_q_du_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhaddw_qu_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhaddw_qu_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_q_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_q_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vhsubw_qu_du(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vhsubw_qu_du(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_d_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_d_w(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_w_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_w_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_h_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_h_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_d_wu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_d_wu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_w_hu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_w_hu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_h_bu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_h_bu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_d_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_d_w(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_w_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_w_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_h_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_h_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_d_wu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_d_wu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_w_hu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_w_hu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_h_bu(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_h_bu(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_d_wu_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_d_wu_w(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_w_hu_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_w_hu_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_h_bu_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_h_bu_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_d_wu_w(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_d_wu_w(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_w_hu_h(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_w_hu_h(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_h_bu_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_h_bu_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_q_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_q_d(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_q_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_q_d(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_q_du(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_q_du(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_q_du(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_q_du(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwev_q_du_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwev_q_du_d(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmaddwod_q_du_d(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vmaddwod_q_du_d(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vrotr_b(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vrotr_b(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vrotr_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vrotr_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vrotr_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vrotr_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vrotr_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vrotr_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vadd_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vadd_q(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vsub_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsub_q(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3959,7 +3959,7 @@ pub unsafe fn lsx_vldrepl_b(mem_addr: *const i8) -> m128i { transmute(__lsx_vldrepl_b(mem_addr, IMM_S12)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3968,7 +3968,7 @@ pub unsafe fn lsx_vldrepl_h(mem_addr: *const i8) -> m128i { transmute(__lsx_vldrepl_h(mem_addr, IMM_S11)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3977,7 +3977,7 @@ pub unsafe fn lsx_vldrepl_w(mem_addr: *const i8) -> m128i { transmute(__lsx_vldrepl_w(mem_addr, IMM_S10)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -3986,77 +3986,77 @@ pub unsafe fn lsx_vldrepl_d(mem_addr: *const i8) -> m128i { transmute(__lsx_vldrepl_d(mem_addr, IMM_S9)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmskgez_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vmskgez_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vmsknz_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vmsknz_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_h_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_h_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_w_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_w_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_d_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_d_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_q_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_q_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_hu_bu(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_hu_bu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_wu_hu(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_wu_hu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_du_wu(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_du_wu(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vexth_qu_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vexth_qu_du(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4065,7 +4065,7 @@ pub fn lsx_vrotri_b(a: m128i) -> m128i { unsafe { transmute(__lsx_vrotri_b(transmute(a), IMM3)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4074,7 +4074,7 @@ pub fn lsx_vrotri_h(a: m128i) -> m128i { unsafe { transmute(__lsx_vrotri_h(transmute(a), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4083,7 +4083,7 @@ pub fn lsx_vrotri_w(a: m128i) -> m128i { unsafe { transmute(__lsx_vrotri_w(transmute(a), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4092,14 +4092,14 @@ pub fn lsx_vrotri_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vrotri_d(transmute(a), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vextl_q_d(a: m128i) -> m128i { unsafe { transmute(__lsx_vextl_q_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4108,7 +4108,7 @@ pub fn lsx_vsrlni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4117,7 +4117,7 @@ pub fn lsx_vsrlni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4126,7 +4126,7 @@ pub fn lsx_vsrlni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4135,7 +4135,7 @@ pub fn lsx_vsrlni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4144,7 +4144,7 @@ pub fn lsx_vsrlrni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4153,7 +4153,7 @@ pub fn lsx_vsrlrni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4162,7 +4162,7 @@ pub fn lsx_vsrlrni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4171,7 +4171,7 @@ pub fn lsx_vsrlrni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrlrni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4180,7 +4180,7 @@ pub fn lsx_vssrlni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4189,7 +4189,7 @@ pub fn lsx_vssrlni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4198,7 +4198,7 @@ pub fn lsx_vssrlni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4207,7 +4207,7 @@ pub fn lsx_vssrlni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4216,7 +4216,7 @@ pub fn lsx_vssrlni_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4225,7 +4225,7 @@ pub fn lsx_vssrlni_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4234,7 +4234,7 @@ pub fn lsx_vssrlni_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4243,7 +4243,7 @@ pub fn lsx_vssrlni_du_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4252,7 +4252,7 @@ pub fn lsx_vssrlrni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4261,7 +4261,7 @@ pub fn lsx_vssrlrni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4270,7 +4270,7 @@ pub fn lsx_vssrlrni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4279,7 +4279,7 @@ pub fn lsx_vssrlrni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4288,7 +4288,7 @@ pub fn lsx_vssrlrni_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4297,7 +4297,7 @@ pub fn lsx_vssrlrni_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4306,7 +4306,7 @@ pub fn lsx_vssrlrni_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4315,7 +4315,7 @@ pub fn lsx_vssrlrni_du_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4324,7 +4324,7 @@ pub fn lsx_vsrani_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrani_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4333,7 +4333,7 @@ pub fn lsx_vsrani_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrani_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4342,7 +4342,7 @@ pub fn lsx_vsrani_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrani_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4351,7 +4351,7 @@ pub fn lsx_vsrani_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrani_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4360,7 +4360,7 @@ pub fn lsx_vsrarni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4369,7 +4369,7 @@ pub fn lsx_vsrarni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4378,7 +4378,7 @@ pub fn lsx_vsrarni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4387,7 +4387,7 @@ pub fn lsx_vsrarni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vsrarni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4396,7 +4396,7 @@ pub fn lsx_vssrani_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4405,7 +4405,7 @@ pub fn lsx_vssrani_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4414,7 +4414,7 @@ pub fn lsx_vssrani_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4423,7 +4423,7 @@ pub fn lsx_vssrani_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4432,7 +4432,7 @@ pub fn lsx_vssrani_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4441,7 +4441,7 @@ pub fn lsx_vssrani_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4450,7 +4450,7 @@ pub fn lsx_vssrani_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4459,7 +4459,7 @@ pub fn lsx_vssrani_du_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrani_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4468,7 +4468,7 @@ pub fn lsx_vssrarni_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_b_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4477,7 +4477,7 @@ pub fn lsx_vssrarni_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_h_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4486,7 +4486,7 @@ pub fn lsx_vssrarni_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_w_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4495,7 +4495,7 @@ pub fn lsx_vssrarni_d_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_d_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4504,7 +4504,7 @@ pub fn lsx_vssrarni_bu_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_bu_h(transmute(a), transmute(b), IMM4)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4513,7 +4513,7 @@ pub fn lsx_vssrarni_hu_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_hu_w(transmute(a), transmute(b), IMM5)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4522,7 +4522,7 @@ pub fn lsx_vssrarni_wu_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_wu_d(transmute(a), transmute(b), IMM6)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4531,7 +4531,7 @@ pub fn lsx_vssrarni_du_q(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrarni_du_q(transmute(a), transmute(b), IMM7)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4540,7 +4540,7 @@ pub fn lsx_vpermi_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vpermi_w(transmute(a), transmute(b), IMM8)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4549,7 +4549,7 @@ pub unsafe fn lsx_vld(mem_addr: *const i8) -> m128i { transmute(__lsx_vld(mem_addr, IMM_S12)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(2)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4558,49 +4558,49 @@ pub unsafe fn lsx_vst(a: m128i, mem_addr: *mut i8) { __lsx_vst(transmute(a), mem_addr, IMM_S12) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrlrn_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrlrn_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_b_h(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_b_h(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_h_w(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_h_w(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vssrln_w_d(a: m128i, b: m128i) -> m128i { unsafe { transmute(__lsx_vssrln_w_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -4609,406 +4609,406 @@ pub fn lsx_vldi() -> m128i { unsafe { transmute(__lsx_vldi(IMM_S13)) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vshuf_b(a: m128i, b: m128i, c: m128i) -> m128i { unsafe { transmute(__lsx_vshuf_b(transmute(a), transmute(b), transmute(c))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lsx_vldx(mem_addr: *const i8, b: i64) -> m128i { transmute(__lsx_vldx(mem_addr, transmute(b))) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub unsafe fn lsx_vstx(a: m128i, mem_addr: *mut i8, b: i64) { __lsx_vstx(transmute(a), mem_addr, transmute(b)) } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vextl_qu_du(a: m128i) -> m128i { unsafe { transmute(__lsx_vextl_qu_du(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bnz_b(a: m128i) -> i32 { unsafe { transmute(__lsx_bnz_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bnz_d(a: m128i) -> i32 { unsafe { transmute(__lsx_bnz_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bnz_h(a: m128i) -> i32 { unsafe { transmute(__lsx_bnz_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bnz_v(a: m128i) -> i32 { unsafe { transmute(__lsx_bnz_v(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bnz_w(a: m128i) -> i32 { unsafe { transmute(__lsx_bnz_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bz_b(a: m128i) -> i32 { unsafe { transmute(__lsx_bz_b(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bz_d(a: m128i) -> i32 { unsafe { transmute(__lsx_bz_d(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bz_h(a: m128i) -> i32 { unsafe { transmute(__lsx_bz_h(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bz_v(a: m128i) -> i32 { unsafe { transmute(__lsx_bz_v(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_bz_w(a: m128i) -> i32 { unsafe { transmute(__lsx_bz_w(transmute(a))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_caf_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_caf_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_caf_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_caf_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_ceq_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_ceq_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_ceq_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_ceq_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cle_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cle_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cle_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cle_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_clt_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_clt_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_clt_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_clt_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cne_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cne_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cne_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cne_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cor_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cor_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cor_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cor_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cueq_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cueq_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cueq_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cueq_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cule_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cule_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cule_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cule_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cult_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cult_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cult_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cult_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cun_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cun_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cune_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_cune_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cune_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cune_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_cun_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_cun_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_saf_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_saf_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_saf_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_saf_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_seq_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_seq_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_seq_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_seq_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sle_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sle_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sle_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sle_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_slt_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_slt_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_slt_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_slt_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sne_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sne_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sne_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sne_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sor_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sor_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sor_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sor_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sueq_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sueq_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sueq_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sueq_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sule_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sule_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sule_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sule_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sult_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sult_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sult_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sult_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sun_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sun_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sune_d(a: m128d, b: m128d) -> m128i { unsafe { transmute(__lsx_vfcmp_sune_d(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sune_s(a: m128, b: m128) -> m128i { unsafe { transmute(__lsx_vfcmp_sune_s(transmute(a), transmute(b))) } } -#[inline(always)] +#[inline] #[target_feature(enable = "lsx")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn lsx_vfcmp_sun_s(a: m128, b: m128) -> m128i { diff --git a/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs b/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs index b2a67fb609974..948c98df61971 100644 --- a/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs +++ b/library/stdarch/crates/core_arch/src/loongarch_shared/mod.rs @@ -210,7 +210,7 @@ pub unsafe fn syscall() { } /// Calculate the approximate single-precision result of 1.0 divided -#[inline(always)] +#[inline] #[target_feature(enable = "frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn frecipe_s(a: f32) -> f32 { @@ -218,7 +218,7 @@ pub fn frecipe_s(a: f32) -> f32 { } /// Calculate the approximate double-precision result of 1.0 divided -#[inline(always)] +#[inline] #[target_feature(enable = "frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn frecipe_d(a: f64) -> f64 { @@ -226,7 +226,7 @@ pub fn frecipe_d(a: f64) -> f64 { } /// Calculate the approximate single-precision result of dividing 1.0 by the square root -#[inline(always)] +#[inline] #[target_feature(enable = "frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn frsqrte_s(a: f32) -> f32 { @@ -234,7 +234,7 @@ pub fn frsqrte_s(a: f32) -> f32 { } /// Calculate the approximate double-precision result of dividing 1.0 by the square root -#[inline(always)] +#[inline] #[target_feature(enable = "frecipe")] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn frsqrte_d(a: f64) -> f64 { diff --git a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs index 8bdee521a3e86..8c4d4e13ec15a 100644 --- a/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs +++ b/library/stdarch/crates/stdarch-gen-loongarch/src/main.rs @@ -618,7 +618,7 @@ fn gen_bind_body( let function = if !rustc_legacy_const_generics.is_empty() { format!( r#" -#[inline(always)]{target_feature} +#[inline]{target_feature} #[{rustc_legacy_const_generics}] #[unstable(feature = "stdarch_loongarch", issue = "117427")] {fn_decl}{{ @@ -630,7 +630,7 @@ fn gen_bind_body( } else { format!( r#" -#[inline(always)]{target_feature} +#[inline]{target_feature} #[unstable(feature = "stdarch_loongarch", issue = "117427")] {fn_decl}{{ {call_params} From a854325c91a19495cfb4bcee6fe33ce22968cb67 Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Wed, 6 May 2026 16:39:21 +0100 Subject: [PATCH 29/30] remove `target_feature_inline_always` from lib.rs --- library/stdarch/crates/core_arch/src/lib.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/library/stdarch/crates/core_arch/src/lib.rs b/library/stdarch/crates/core_arch/src/lib.rs index 58a6849ad55df..b9d63b44975ab 100644 --- a/library/stdarch/crates/core_arch/src/lib.rs +++ b/library/stdarch/crates/core_arch/src/lib.rs @@ -33,7 +33,6 @@ x86_amx_intrinsics, f16, aarch64_unstable_target_feature, - target_feature_inline_always, funnel_shifts, avx10_target_feature, const_trait_impl, From 4c3bde6036f376493d22a9e047d73878a5eb38db Mon Sep 17 00:00:00 2001 From: James Barford-Evans Date: Wed, 6 May 2026 16:53:04 +0100 Subject: [PATCH 30/30] replace more instances of `#[inline(always)]` --- .../crates/core_arch/src/loongarch64/simd.rs | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/library/stdarch/crates/core_arch/src/loongarch64/simd.rs b/library/stdarch/crates/core_arch/src/loongarch64/simd.rs index 2dedfa7eb90e0..959e2d0cb68f5 100644 --- a/library/stdarch/crates/core_arch/src/loongarch64/simd.rs +++ b/library/stdarch/crates/core_arch/src/loongarch64/simd.rs @@ -121,7 +121,7 @@ pub(super) const unsafe fn simd_splat(a: i64) -> T { macro_rules! impl_vv { ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ty) => { - #[inline(always)] + #[inline] #[target_feature(enable = $ft)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn $name(a: $oty) -> $oty { @@ -138,7 +138,7 @@ pub(super) use impl_vv; macro_rules! impl_gv { ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $gty:ty) => { - #[inline(always)] + #[inline] #[target_feature(enable = $ft)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn $name(a: $gty) -> $oty { @@ -154,7 +154,7 @@ pub(super) use impl_gv; macro_rules! impl_sv { ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $ibs:expr) => { - #[inline(always)] + #[inline] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(0)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -172,7 +172,7 @@ pub(super) use impl_sv; macro_rules! impl_vvv { ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ty) => { - #[inline(always)] + #[inline] #[target_feature(enable = $ft)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn $name(a: $oty, b: $oty) -> $oty { @@ -190,7 +190,7 @@ pub(super) use impl_vvv; macro_rules! impl_vuv { ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident) => { - #[inline(always)] + #[inline] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -205,7 +205,7 @@ macro_rules! impl_vuv { } }; ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $ibs:expr) => { - #[inline(always)] + #[inline] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -225,7 +225,7 @@ pub(super) use impl_vuv; macro_rules! impl_vug { ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $gty:ty, $ibs:expr) => { - #[inline(always)] + #[inline] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -244,7 +244,7 @@ pub(super) use impl_vug; macro_rules! impl_vsv { ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $ibs:expr) => { - #[inline(always)] + #[inline] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] @@ -264,7 +264,7 @@ pub(super) use impl_vsv; macro_rules! impl_vvvv { ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ty) => { - #[inline(always)] + #[inline] #[target_feature(enable = $ft)] #[unstable(feature = "stdarch_loongarch", issue = "117427")] pub fn $name(a: $oty, b: $oty, c: $oty) -> $oty { @@ -283,7 +283,7 @@ pub(super) use impl_vvvv; macro_rules! impl_vugv { ($ft:literal, $name:ident, $op:path, $oty:ty, $ity:ident, $gty:ty, $ibs:expr) => { - #[inline(always)] + #[inline] #[target_feature(enable = $ft)] #[rustc_legacy_const_generics(1)] #[unstable(feature = "stdarch_loongarch", issue = "117427")]